aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorDimitry Andric <dim@FreeBSD.org>2017-05-08 17:12:57 +0000
committerDimitry Andric <dim@FreeBSD.org>2017-05-08 17:12:57 +0000
commitc46e6a5940c50058e00c0c5f9123fd82e338d29a (patch)
tree89a719d723035c54a190b1f81d329834f1f93336
parent148779df305667b6942fee7e758fdf81a6498f38 (diff)
downloadsrc-c46e6a5940c50058e00c0c5f9123fd82e338d29a.tar.gz
src-c46e6a5940c50058e00c0c5f9123fd82e338d29a.zip
Vendor import of llvm trunk r302418:vendor/llvm/llvm-trunk-r302418
Notes
Notes: svn path=/vendor/llvm/dist/; revision=317948 svn path=/vendor/llvm/llvm-trunk-r302418/; revision=317950; tag=vendor/llvm/llvm-trunk-r302418
-rw-r--r--docs/Lexicon.rst7
-rw-r--r--docs/MIRLangRef.rst2
-rw-r--r--examples/Kaleidoscope/BuildingAJIT/Chapter1/toy.cpp2
-rw-r--r--examples/Kaleidoscope/BuildingAJIT/Chapter2/toy.cpp2
-rw-r--r--examples/Kaleidoscope/BuildingAJIT/Chapter3/toy.cpp2
-rw-r--r--examples/Kaleidoscope/Chapter6/toy.cpp2
-rw-r--r--examples/Kaleidoscope/Chapter7/toy.cpp2
-rw-r--r--examples/Kaleidoscope/Chapter8/toy.cpp2
-rw-r--r--include/llvm/ADT/APInt.h11
-rw-r--r--include/llvm/ADT/BitVector.h33
-rw-r--r--include/llvm/ADT/SmallBitVector.h18
-rw-r--r--include/llvm/Analysis/LoopInfoImpl.h16
-rw-r--r--include/llvm/Analysis/ProfileSummaryInfo.h12
-rw-r--r--include/llvm/Analysis/ScalarEvolution.h12
-rw-r--r--include/llvm/Analysis/TargetLibraryInfo.def3
-rw-r--r--include/llvm/CodeGen/AsmPrinter.h3
-rw-r--r--include/llvm/CodeGen/FastISel.h1
-rw-r--r--include/llvm/CodeGen/FunctionLoweringInfo.h2
-rw-r--r--include/llvm/CodeGen/GlobalISel/IRTranslator.h2
-rw-r--r--include/llvm/CodeGen/GlobalISel/MachineIRBuilder.h6
-rw-r--r--include/llvm/CodeGen/GlobalISel/RegBankSelect.h10
-rw-r--r--include/llvm/CodeGen/GlobalISel/RegisterBankInfo.h54
-rw-r--r--include/llvm/CodeGen/MIRPrinter.h (renamed from lib/CodeGen/MIRPrinter.h)13
-rw-r--r--include/llvm/CodeGen/MachineFrameInfo.h8
-rw-r--r--include/llvm/CodeGen/MachineModuleInfo.h2
-rw-r--r--include/llvm/DebugInfo/CodeView/TypeDatabase.h2
-rw-r--r--include/llvm/DebugInfo/DWARF/DWARFContext.h5
-rw-r--r--include/llvm/DebugInfo/DWARF/DWARFFormValue.h331
-rw-r--r--include/llvm/DebugInfo/PDB/Native/DbiModuleDescriptor.h8
-rw-r--r--include/llvm/DebugInfo/PDB/Native/DbiModuleList.h114
-rw-r--r--include/llvm/DebugInfo/PDB/Native/DbiStream.h13
-rw-r--r--include/llvm/DebugInfo/PDB/Native/NativeCompilandSymbol.h4
-rw-r--r--include/llvm/DebugInfo/PDB/Native/NativeEnumModules.h8
-rw-r--r--include/llvm/DebugInfo/PDB/Native/RawTypes.h11
-rw-r--r--include/llvm/DebugInfo/PDB/Native/TpiStream.h6
-rw-r--r--include/llvm/ExecutionEngine/Orc/RPCSerialization.h4
-rw-r--r--include/llvm/ExecutionEngine/RuntimeDyldChecker.h6
-rw-r--r--include/llvm/IR/Attributes.h15
-rw-r--r--include/llvm/IR/BasicBlock.h13
-rw-r--r--include/llvm/IR/CFG.h54
-rw-r--r--include/llvm/IR/CallSite.h4
-rw-r--r--include/llvm/IR/CallingConv.h12
-rw-r--r--include/llvm/IR/ConstantRange.h7
-rw-r--r--include/llvm/IR/DataLayout.h45
-rw-r--r--include/llvm/IR/DebugInfo.h25
-rw-r--r--include/llvm/IR/Dominators.h14
-rw-r--r--include/llvm/IR/Function.h1
-rw-r--r--include/llvm/IR/InlineAsm.h6
-rw-r--r--include/llvm/IR/InstIterator.h34
-rw-r--r--include/llvm/IR/InstrTypes.h46
-rw-r--r--include/llvm/IR/Intrinsics.td8
-rw-r--r--include/llvm/IR/IntrinsicsARM.td186
-rw-r--r--include/llvm/IR/ModuleSummaryIndex.h152
-rw-r--r--include/llvm/IR/ModuleSummaryIndexYAML.h4
-rw-r--r--include/llvm/MC/ConstantPools.h3
-rw-r--r--include/llvm/Object/COFF.h40
-rw-r--r--include/llvm/Object/Wasm.h6
-rw-r--r--include/llvm/ObjectYAML/WasmYAML.h15
-rw-r--r--include/llvm/Support/AArch64TargetParser.def36
-rw-r--r--include/llvm/Support/BinaryStreamArray.h15
-rw-r--r--include/llvm/Support/COFF.h44
-rw-r--r--include/llvm/Support/KnownBits.h79
-rw-r--r--include/llvm/Support/MathExtras.h12
-rw-r--r--include/llvm/Target/GlobalISel/SelectionDAGCompat.td38
-rw-r--r--include/llvm/Target/Target.td10
-rw-r--r--include/llvm/Target/TargetOpcodes.def4
-rw-r--r--include/llvm/Transforms/Instrumentation.h1
-rw-r--r--include/llvm/Transforms/Scalar/Float2Int.h2
-rw-r--r--lib/Analysis/ConstantFolding.cpp9
-rw-r--r--lib/Analysis/InstructionSimplify.cpp361
-rw-r--r--lib/Analysis/LazyValueInfo.cpp14
-rw-r--r--lib/Analysis/Lint.cpp4
-rw-r--r--lib/Analysis/ModuleSummaryAnalysis.cpp43
-rw-r--r--lib/Analysis/ScalarEvolution.cpp128
-rw-r--r--lib/Analysis/TargetLibraryInfo.cpp4
-rw-r--r--lib/Analysis/ValueTracking.cpp79
-rw-r--r--lib/Bitcode/Reader/BitcodeReader.cpp77
-rw-r--r--lib/Bitcode/Writer/BitcodeWriter.cpp22
-rw-r--r--lib/CodeGen/AsmPrinter/AsmPrinter.cpp52
-rw-r--r--lib/CodeGen/AsmPrinter/CodeViewDebug.cpp12
-rw-r--r--lib/CodeGen/BranchFolding.cpp34
-rw-r--r--lib/CodeGen/GlobalISel/IRTranslator.cpp11
-rw-r--r--lib/CodeGen/GlobalISel/InstructionSelect.cpp7
-rw-r--r--lib/CodeGen/GlobalISel/Legalizer.cpp9
-rw-r--r--lib/CodeGen/GlobalISel/RegBankSelect.cpp28
-rw-r--r--lib/CodeGen/GlobalISel/RegisterBankInfo.cpp68
-rw-r--r--lib/CodeGen/MIRParser/MIParser.cpp42
-rw-r--r--lib/CodeGen/MIRPrinter.cpp73
-rw-r--r--lib/CodeGen/MIRPrintingPass.cpp3
-rw-r--r--lib/CodeGen/MachineFrameInfo.cpp26
-rw-r--r--lib/CodeGen/MachineVerifier.cpp8
-rw-r--r--lib/CodeGen/PrologEpilogInserter.cpp3
-rw-r--r--lib/CodeGen/SelectionDAG/DAGCombiner.cpp21
-rw-r--r--lib/CodeGen/SelectionDAG/FastISel.cpp22
-rw-r--r--lib/CodeGen/SelectionDAG/FunctionLoweringInfo.cpp3
-rw-r--r--lib/CodeGen/SelectionDAG/ResourcePriorityQueue.cpp67
-rw-r--r--lib/CodeGen/SelectionDAG/ScheduleDAGFast.cpp89
-rw-r--r--lib/CodeGen/SelectionDAG/ScheduleDAGRRList.cpp57
-rw-r--r--lib/CodeGen/SelectionDAG/SelectionDAG.cpp62
-rw-r--r--lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp88
-rw-r--r--lib/CodeGen/SelectionDAG/TargetLowering.cpp49
-rw-r--r--lib/CodeGen/XRayInstrumentation.cpp46
-rw-r--r--lib/DebugInfo/CodeView/TypeDatabase.cpp5
-rw-r--r--lib/DebugInfo/DWARF/DWARFContext.cpp39
-rw-r--r--lib/DebugInfo/DWARF/DWARFFormValue.cpp581
-rw-r--r--lib/DebugInfo/PDB/CMakeLists.txt1
-rw-r--r--lib/DebugInfo/PDB/Native/DbiModuleList.cpp273
-rw-r--r--lib/DebugInfo/PDB/Native/DbiStream.cpp112
-rw-r--r--lib/DebugInfo/PDB/Native/NativeCompilandSymbol.cpp8
-rw-r--r--lib/DebugInfo/PDB/Native/NativeEnumModules.cpp15
-rw-r--r--lib/DebugInfo/PDB/Native/NativeExeSymbol.cpp2
-rw-r--r--lib/DebugInfo/PDB/Native/TpiStream.cpp26
-rw-r--r--lib/ExecutionEngine/RuntimeDyld/RuntimeDyldChecker.cpp14
-rw-r--r--lib/ExecutionEngine/RuntimeDyld/RuntimeDyldCheckerImpl.h2
-rw-r--r--lib/Fuzzer/FuzzerLoop.cpp2
-rw-r--r--lib/IR/ConstantRange.cpp29
-rw-r--r--lib/IR/DataLayout.cpp27
-rw-r--r--lib/IR/DebugInfo.cpp39
-rw-r--r--lib/IR/Instruction.cpp43
-rw-r--r--lib/IR/ModuleSummaryIndex.cpp12
-rw-r--r--lib/LTO/LTO.cpp9
-rw-r--r--lib/LTO/ThinLTOCodeGenerator.cpp5
-rw-r--r--lib/MC/ConstantPools.cpp11
-rw-r--r--lib/MC/MCParser/AsmParser.cpp42
-rw-r--r--lib/Object/COFFObjectFile.cpp45
-rw-r--r--lib/Object/WasmObjectFile.cpp68
-rw-r--r--lib/ObjectYAML/WasmYAML.cpp12
-rw-r--r--lib/Passes/PassBuilder.cpp4
-rw-r--r--lib/Support/APInt.cpp194
-rw-r--r--lib/Support/TargetParser.cpp6
-rw-r--r--lib/Support/Unix/DynamicLibrary.inc2
-rw-r--r--lib/Support/Unix/Path.inc9
-rw-r--r--lib/Target/AArch64/AArch64.h2
-rw-r--r--lib/Target/AArch64/AArch64.td1
-rw-r--r--lib/Target/AArch64/AArch64AddressTypePromotion.cpp493
-rw-r--r--lib/Target/AArch64/AArch64ISelLowering.cpp6
-rw-r--r--lib/Target/AArch64/AArch64InstrInfo.td56
-rw-r--r--lib/Target/AArch64/AArch64RegisterBankInfo.cpp60
-rw-r--r--lib/Target/AArch64/AArch64RegisterBankInfo.h7
-rw-r--r--lib/Target/AArch64/AArch64TargetMachine.cpp9
-rw-r--r--lib/Target/AArch64/CMakeLists.txt1
-rw-r--r--lib/Target/AMDGPU/AMDGPUAsmPrinter.cpp3
-rw-r--r--lib/Target/AMDGPU/AMDGPUISelLowering.cpp2
-rw-r--r--lib/Target/AMDGPU/AMDGPURegisterBankInfo.cpp91
-rw-r--r--lib/Target/AMDGPU/AMDGPURegisterBankInfo.h5
-rw-r--r--lib/Target/AMDGPU/SIFrameLowering.cpp3
-rw-r--r--lib/Target/AMDGPU/SIISelLowering.cpp14
-rw-r--r--lib/Target/AMDGPU/SIInsertWaitcnts.cpp10
-rw-r--r--lib/Target/AMDGPU/SIMachineFunctionInfo.cpp8
-rw-r--r--lib/Target/ARM/ARMBaseRegisterInfo.cpp15
-rw-r--r--lib/Target/ARM/ARMISelLowering.cpp53
-rw-r--r--lib/Target/ARM/ARMISelLowering.h6
-rw-r--r--lib/Target/ARM/ARMInstrInfo.td215
-rw-r--r--lib/Target/ARM/ARMInstrNEON.td26
-rw-r--r--lib/Target/ARM/ARMInstrThumb2.td233
-rw-r--r--lib/Target/ARM/ARMRegisterBankInfo.cpp14
-rw-r--r--lib/Target/ARM/ARMRegisterBankInfo.h3
-rw-r--r--lib/Target/ARM/MCTargetDesc/ARMTargetStreamer.cpp1
-rw-r--r--lib/Target/BPF/MCTargetDesc/BPFAsmBackend.cpp2
-rw-r--r--lib/Target/Hexagon/Disassembler/HexagonDisassembler.cpp2
-rw-r--r--lib/Target/Hexagon/HexagonDepIICHVX.td1143
-rw-r--r--lib/Target/Hexagon/HexagonDepIICScalar.td2504
-rw-r--r--lib/Target/Hexagon/HexagonDepITypes.h56
-rw-r--r--lib/Target/Hexagon/HexagonDepITypes.td56
-rw-r--r--lib/Target/Hexagon/HexagonDepInstrFormats.td5327
-rw-r--r--lib/Target/Hexagon/HexagonDepInstrInfo.td6429
-rw-r--r--lib/Target/Hexagon/HexagonDepTimingClasses.h132
-rw-r--r--lib/Target/Hexagon/HexagonIICHVX.td100
-rw-r--r--lib/Target/Hexagon/HexagonIICScalar.td164
-rw-r--r--lib/Target/Hexagon/HexagonInstrFormats.td164
-rw-r--r--lib/Target/Hexagon/HexagonInstrFormatsV4.td63
-rw-r--r--lib/Target/Hexagon/HexagonInstrFormatsV60.td180
-rw-r--r--lib/Target/Hexagon/HexagonInstrInfo.cpp152
-rw-r--r--lib/Target/Hexagon/HexagonInstrInfo.h15
-rw-r--r--lib/Target/Hexagon/HexagonMachineScheduler.cpp2
-rw-r--r--lib/Target/Hexagon/HexagonPatterns.td8
-rw-r--r--lib/Target/Hexagon/HexagonPseudo.td272
-rw-r--r--lib/Target/Hexagon/HexagonRegisterInfo.td16
-rw-r--r--lib/Target/Hexagon/HexagonSchedule.td51
-rw-r--r--lib/Target/Hexagon/HexagonScheduleV4.td213
-rw-r--r--lib/Target/Hexagon/HexagonScheduleV55.td207
-rw-r--r--lib/Target/Hexagon/HexagonScheduleV60.td253
-rw-r--r--lib/Target/Hexagon/HexagonScheduleV62.td112
-rw-r--r--lib/Target/Hexagon/HexagonSubtarget.cpp302
-rw-r--r--lib/Target/Hexagon/HexagonSubtarget.h10
-rw-r--r--lib/Target/Hexagon/HexagonVLIWPacketizer.cpp110
-rw-r--r--lib/Target/Hexagon/MCTargetDesc/HexagonBaseInfo.h2
-rw-r--r--lib/Target/Hexagon/MCTargetDesc/HexagonMCCodeEmitter.cpp8
-rw-r--r--lib/Target/Hexagon/MCTargetDesc/HexagonShuffler.cpp9
-rw-r--r--lib/Target/Hexagon/RDFLiveness.cpp33
-rw-r--r--lib/Target/Hexagon/RDFRegisters.cpp15
-rw-r--r--lib/Target/Hexagon/RDFRegisters.h1
-rw-r--r--lib/Target/Mips/MipsAsmPrinter.cpp35
-rw-r--r--lib/Target/NVPTX/NVPTXISelLowering.cpp9323
-rw-r--r--lib/Target/NVPTX/NVPTXInstrInfo.td6329
-rw-r--r--lib/Target/PowerPC/PPCFrameLowering.cpp3
-rw-r--r--lib/Target/PowerPC/PPCISelLowering.cpp2
-rw-r--r--lib/Target/Sparc/SparcISelLowering.cpp2
-rw-r--r--lib/Target/SystemZ/SystemZInstrInfo.cpp8
-rw-r--r--lib/Target/X86/AsmParser/X86AsmParser.cpp94
-rw-r--r--lib/Target/X86/AsmParser/X86Operand.h14
-rw-r--r--lib/Target/X86/X86AsmPrinter.h1
-rw-r--r--lib/Target/X86/X86FrameLowering.cpp4
-rw-r--r--lib/Target/X86/X86ISelLowering.cpp273
-rw-r--r--lib/Target/X86/X86InstrAVX512.td39
-rw-r--r--lib/Target/X86/X86InstrInfo.td6
-rw-r--r--lib/Target/X86/X86InstrSSE.td9
-rw-r--r--lib/Target/X86/X86InstructionSelector.cpp30
-rw-r--r--lib/Target/X86/X86LegalizerInfo.cpp102
-rw-r--r--lib/Target/X86/X86LegalizerInfo.h5
-rw-r--r--lib/Target/X86/X86MCInstLower.cpp80
-rw-r--r--lib/Target/X86/X86OptimizeLEAs.cpp8
-rw-r--r--lib/Target/X86/X86RegisterBankInfo.cpp23
-rw-r--r--lib/Target/X86/X86RegisterBankInfo.h7
-rw-r--r--lib/Target/X86/X86Subtarget.cpp10
-rw-r--r--lib/Target/X86/X86TargetTransformInfo.cpp32
-rw-r--r--lib/Target/XCore/XCoreISelLowering.cpp2
-rw-r--r--lib/Transforms/IPO/ArgumentPromotion.cpp2
-rw-r--r--lib/Transforms/IPO/FunctionImport.cpp97
-rw-r--r--lib/Transforms/IPO/LowerTypeTests.cpp2
-rw-r--r--lib/Transforms/IPO/WholeProgramDevirt.cpp2
-rw-r--r--lib/Transforms/InstCombine/InstCombineAddSub.cpp76
-rw-r--r--lib/Transforms/InstCombine/InstCombineAndOrXor.cpp57
-rw-r--r--lib/Transforms/InstCombine/InstCombineCalls.cpp2
-rw-r--r--lib/Transforms/InstCombine/InstCombineCompares.cpp2
-rw-r--r--lib/Transforms/InstCombine/InstCombineSimplifyDemanded.cpp21
-rw-r--r--lib/Transforms/InstCombine/InstructionCombining.cpp8
-rw-r--r--lib/Transforms/Instrumentation/IndirectCallPromotion.cpp4
-rw-r--r--lib/Transforms/Instrumentation/SanitizerCoverage.cpp11
-rw-r--r--lib/Transforms/Scalar/Float2Int.cpp10
-rw-r--r--lib/Transforms/Scalar/JumpThreading.cpp32
-rw-r--r--lib/Transforms/Scalar/LoopIdiomRecognize.cpp10
-rw-r--r--lib/Transforms/Scalar/NewGVN.cpp9
-rw-r--r--lib/Transforms/Scalar/TailRecursionElimination.cpp15
-rw-r--r--lib/Transforms/Utils/BuildLibCalls.cpp1
-rw-r--r--lib/Transforms/Utils/SimplifyCFG.cpp2
-rw-r--r--lib/Transforms/Utils/ValueMapper.cpp17
-rw-r--r--lib/Transforms/Vectorize/LoopVectorize.cpp2
-rw-r--r--test/Analysis/CostModel/X86/bitreverse.ll8
-rw-r--r--test/Analysis/CostModel/X86/ctbits-cost.ll40
-rw-r--r--test/Analysis/ScalarEvolution/ZeroStep.ll18
-rw-r--r--test/CodeGen/AArch64/GlobalISel/arm64-irtranslator.ll9
-rw-r--r--test/CodeGen/AArch64/GlobalISel/debug-insts.ll41
-rw-r--r--test/CodeGen/AArch64/GlobalISel/select-dbg-value.mir7
-rw-r--r--test/CodeGen/AArch64/fadd-combines.ll78
-rw-r--r--test/CodeGen/AArch64/loh.mir1
-rw-r--r--test/CodeGen/AArch64/machine-copy-remove.mir62
-rw-r--r--test/CodeGen/AArch64/machine-sink-zr.mir5
-rw-r--r--test/CodeGen/AArch64/regcoal-physreg.mir2
-rw-r--r--test/CodeGen/AArch64/xray-attribute-instrumentation.ll1
-rw-r--r--test/CodeGen/AArch64/xray-tail-call-sled.ll12
-rw-r--r--test/CodeGen/AMDGPU/detect-dead-lanes.mir6
-rw-r--r--test/CodeGen/AMDGPU/fmuladd.f32.ll8
-rw-r--r--test/CodeGen/AMDGPU/inserted-wait-states.mir20
-rw-r--r--test/CodeGen/AMDGPU/invert-br-undef-vcc.mir3
-rw-r--r--test/CodeGen/AMDGPU/lds-size.ll4
-rw-r--r--test/CodeGen/AMDGPU/liveness.mir2
-rw-r--r--test/CodeGen/AMDGPU/local-stack-slot-bug.ll26
-rw-r--r--test/CodeGen/AMDGPU/optimize-if-exec-masking.mir20
-rw-r--r--test/CodeGen/AMDGPU/rename-independent-subregs.mir1
-rw-r--r--test/CodeGen/AMDGPU/scratch-simple.ll103
-rw-r--r--test/CodeGen/AMDGPU/si-fix-sgpr-copies.mir3
-rw-r--r--test/CodeGen/AMDGPU/subreg-intervals.mir3
-rw-r--r--test/CodeGen/AMDGPU/vccz-corrupt-bug-workaround.mir6
-rw-r--r--test/CodeGen/AMDGPU/waitcnt-looptest.ll146
-rw-r--r--test/CodeGen/ARM/ARMLoadStoreDBG.mir1
-rw-r--r--test/CodeGen/ARM/acle-intrinsics-v5.ll110
-rw-r--r--test/CodeGen/ARM/acle-intrinsics.ll481
-rw-r--r--test/CodeGen/ARM/alloca-align.ll24
-rw-r--r--test/CodeGen/ARM/cmp1-peephole-thumb.mir3
-rw-r--r--test/CodeGen/ARM/cmp2-peephole-thumb.mir5
-rw-r--r--test/CodeGen/ARM/dbg-range-extension.mir5
-rw-r--r--test/CodeGen/ARM/sat-arith.ll63
-rw-r--r--test/CodeGen/ARM/vabs.ll38
-rw-r--r--test/CodeGen/ARM/xray-armv6-attribute-instrumentation.ll6
-rw-r--r--test/CodeGen/ARM/xray-armv7-attribute-instrumentation.ll7
-rw-r--r--test/CodeGen/BPF/dwarfdump.ll2
-rw-r--r--test/CodeGen/Hexagon/branch-folder-hoist-kills.mir59
-rw-r--r--test/CodeGen/Hexagon/rdf-cover-use.ll38
-rw-r--r--test/CodeGen/Hexagon/swp-matmul-bitext.ll13
-rw-r--r--test/CodeGen/MIR/Generic/branch-probabilities.ll28
-rw-r--r--test/CodeGen/MIR/X86/auto-successor.mir61
-rw-r--r--test/CodeGen/MIR/X86/branch-probabilities.mir18
-rw-r--r--test/CodeGen/MIR/X86/successor-basic-blocks.mir1
-rw-r--r--test/CodeGen/PowerPC/restore-r30.ll30
-rw-r--r--test/CodeGen/SystemZ/copy-physreg-128.ll68
-rw-r--r--test/CodeGen/X86/2014-08-29-CompactUnwind.ll4
-rw-r--r--test/CodeGen/X86/GlobalISel/gep.ll136
-rw-r--r--test/CodeGen/X86/GlobalISel/legalize-gep.mir101
-rw-r--r--test/CodeGen/X86/GlobalISel/legalize-mul-scalar.mir115
-rw-r--r--test/CodeGen/X86/GlobalISel/legalize-mul-v128.mir111
-rw-r--r--test/CodeGen/X86/GlobalISel/legalize-mul-v256.mir111
-rw-r--r--test/CodeGen/X86/GlobalISel/legalize-mul-v512.mir113
-rw-r--r--test/CodeGen/X86/GlobalISel/mul-scalar.ll39
-rw-r--r--test/CodeGen/X86/GlobalISel/mul-vec.ll84
-rw-r--r--test/CodeGen/X86/GlobalISel/regbankselect-AVX2.mir31
-rw-r--r--test/CodeGen/X86/GlobalISel/regbankselect-AVX512.mir33
-rw-r--r--test/CodeGen/X86/GlobalISel/regbankselect-X86_64.mir75
-rw-r--r--test/CodeGen/X86/GlobalISel/select-gep.mir37
-rw-r--r--test/CodeGen/X86/GlobalISel/select-mul-scalar.mir112
-rw-r--r--test/CodeGen/X86/GlobalISel/select-mul-vec.mir480
-rw-r--r--test/CodeGen/X86/addcarry.ll67
-rw-r--r--test/CodeGen/X86/avx-isa-check.ll5
-rw-r--r--test/CodeGen/X86/avx1-logical-load-folding.ll83
-rw-r--r--test/CodeGen/X86/avx2-schedule.ll338
-rw-r--r--test/CodeGen/X86/avx512vl-arith.ll754
-rw-r--r--test/CodeGen/X86/branchfolding-undef.mir1
-rw-r--r--test/CodeGen/X86/build-vector-128.ll428
-rw-r--r--test/CodeGen/X86/build-vector-256.ll434
-rw-r--r--test/CodeGen/X86/build-vector-512.ll712
-rw-r--r--test/CodeGen/X86/combine-abs.ll46
-rw-r--r--test/CodeGen/X86/commuted-blend-mask.ll5
-rw-r--r--test/CodeGen/X86/ctpop-combine.ll14
-rw-r--r--test/CodeGen/X86/dbg-baseptr.ll15
-rw-r--r--test/CodeGen/X86/eflags-copy-expansion.mir1
-rw-r--r--test/CodeGen/X86/frame-lowering-debug-intrinsic.ll7
-rw-r--r--test/CodeGen/X86/implicit-null-checks.mir32
-rw-r--r--test/CodeGen/X86/invalid-liveness.mir2
-rw-r--r--test/CodeGen/X86/machine-region-info.mir23
-rw-r--r--test/CodeGen/X86/ms-inline-asm-avx512.ll24
-rw-r--r--test/CodeGen/X86/pr27681.mir3
-rw-r--r--test/CodeGen/X86/pr32907.ll54
-rw-r--r--test/CodeGen/X86/pre-coalesce.mir7
-rw-r--r--test/CodeGen/X86/regcall-no-plt.ll44
-rw-r--r--test/CodeGen/X86/shuffle-vs-trunc-512.ll8
-rw-r--r--test/CodeGen/X86/stack-folding-int-avx512.ll12
-rw-r--r--test/CodeGen/X86/vec_partial.ll36
-rw-r--r--test/CodeGen/X86/vec_reassociate.ll160
-rw-r--r--test/CodeGen/X86/vector-lzcnt-512.ll678
-rw-r--r--test/CodeGen/X86/vector-shuffle-variable-128.ll91
-rw-r--r--test/CodeGen/X86/win64_eh_leaf.ll9
-rw-r--r--test/CodeGen/X86/xray-attribute-instrumentation.ll13
-rw-r--r--test/CodeGen/X86/xray-custom-log.ll23
-rw-r--r--test/CodeGen/X86/xray-loop-detection.ll23
-rw-r--r--test/CodeGen/X86/xray-tail-call-sled.ll16
-rw-r--r--test/DebugInfo/COFF/synthetic.ll55
-rw-r--r--test/DebugInfo/Inputs/dwarfdump-decompression-error.elf-x86-64bin0 -> 7096 bytes
-rw-r--r--test/DebugInfo/dwarfdump-decompression-error.test15
-rw-r--r--test/Linker/metadata-global.ll11
-rw-r--r--test/MC/AArch64/basic-a64-instructions.s17
-rw-r--r--test/MC/AArch64/crc.s45
-rw-r--r--test/MC/AArch64/cyclone-crc.s27
-rw-r--r--test/MC/AArch64/directive-arch-negative.s6
-rw-r--r--test/MC/ARM/ltorg-range.s27
-rw-r--r--test/MC/ARM/negative-immediates-fail.s5
-rw-r--r--test/MC/ARM/negative-immediates-thumb1-fail.s5
-rw-r--r--test/MC/ARM/negative-immediates.s16
-rw-r--r--test/MC/AsmParser/altmacro_string.s73
-rw-r--r--test/MC/AsmParser/negative_altmacro_string.s29
-rw-r--r--test/MC/Disassembler/AArch64/basic-a64-instructions.txt17
-rw-r--r--test/ObjectYAML/wasm/name_section.yaml40
-rw-r--r--test/Other/new-pm-defaults.ll2
-rw-r--r--test/Transforms/ArgumentPromotion/pr32917.ll23
-rw-r--r--test/Transforms/CodeExtractor/PartialInlineOptRemark.ll54
-rw-r--r--test/Transforms/Inline/inline-hot-callsite.ll12
-rw-r--r--test/Transforms/Inline/prof-update.ll12
-rw-r--r--test/Transforms/InstCombine/2005-06-16-SetCCOrSetCCMiscompile.ll14
-rw-r--r--test/Transforms/InstCombine/AddOverFlow.ll76
-rw-r--r--test/Transforms/InstCombine/and-or-icmps.ll4
-rw-r--r--test/Transforms/InstCombine/debuginfo-dce.ll52
-rw-r--r--test/Transforms/InstCombine/demand_shrink_nsw.ll2
-rw-r--r--test/Transforms/InstCombine/or.ll11
-rw-r--r--test/Transforms/InstCombine/strlen-1.ll22
-rw-r--r--test/Transforms/InstSimplify/AndOrXor.ll45
-rw-r--r--test/Transforms/InstSimplify/compare.ll29
-rw-r--r--test/Transforms/InstSimplify/icmp-ranges.ll2726
-rw-r--r--test/Transforms/InstSimplify/shufflevector.ll14
-rw-r--r--test/Transforms/LoopIdiom/unsafe.ll55
-rw-r--r--test/Transforms/LoopRotate/dbgvalue.ll47
-rw-r--r--test/Transforms/SampleProfile/Inputs/indirect-call.prof24
-rw-r--r--test/Transforms/SampleProfile/indirect-call.ll58
-rw-r--r--test/Unit/lit.cfg4
-rw-r--r--test/tools/llvm-objdump/WebAssembly/symbol-table.test8
-rw-r--r--test/tools/llvm-readobj/Inputs/resources/cursor_small.bmpbin0 -> 822 bytes
-rw-r--r--test/tools/llvm-readobj/Inputs/resources/okay_small.bmpbin0 -> 822 bytes
-rw-r--r--test/tools/llvm-readobj/Inputs/resources/test_resource.obj.coffbin0 -> 3200 bytes
-rw-r--r--test/tools/llvm-readobj/Inputs/resources/test_resource.rc44
-rw-r--r--test/tools/llvm-readobj/Inputs/resources/test_resource.resbin0 -> 2200 bytes
-rw-r--r--test/tools/llvm-readobj/resources.test128
-rw-r--r--tools/llvm-link/llvm-link.cpp2
-rw-r--r--tools/llvm-lto/llvm-lto.cpp2
-rw-r--r--tools/llvm-pdbdump/Analyze.cpp2
-rw-r--r--tools/llvm-pdbdump/LLVMOutputStyle.cpp172
-rw-r--r--tools/llvm-pdbdump/LLVMOutputStyle.h7
-rw-r--r--tools/llvm-pdbdump/StreamUtil.cpp13
-rw-r--r--tools/llvm-pdbdump/YAMLOutputStyle.cpp19
-rw-r--r--tools/llvm-readobj/COFFDumper.cpp111
-rw-r--r--tools/llvm-rtdyld/llvm-rtdyld.cpp23
-rw-r--r--tools/obj2yaml/wasm2yaml.cpp19
-rw-r--r--tools/yaml2obj/yaml2wasm.cpp46
-rw-r--r--unittests/ADT/APIntTest.cpp19
-rw-r--r--unittests/ADT/BitVectorTest.cpp29
-rw-r--r--unittests/Analysis/TargetLibraryInfoTest.cpp1
-rw-r--r--unittests/Support/TargetParserTest.cpp36
-rwxr-xr-xutils/release/test-release.sh10
-rw-r--r--utils/unittest/googletest/README.LLVM2
-rw-r--r--utils/unittest/googletest/include/gtest/internal/gtest-port-arch.h2
-rw-r--r--utils/unittest/googletest/include/gtest/internal/gtest-port.h2
398 files changed, 32647 insertions, 20371 deletions
diff --git a/docs/Lexicon.rst b/docs/Lexicon.rst
index 5d16091e27e5..35687e258182 100644
--- a/docs/Lexicon.rst
+++ b/docs/Lexicon.rst
@@ -38,6 +38,13 @@ B
**BB Vectorization**
Basic-Block Vectorization
+**BDCE**
+ Bit-tracking dead code elimination. Some bit-wise instructions (shifts,
+ ands, ors, etc.) "kill" some of their input bits -- that is, they make it
+ such that those bits can be either zero or one without affecting control or
+ data flow of a program. The BDCE pass removes instructions that only
+ compute these dead bits.
+
**BURS**
Bottom Up Rewriting System --- A method of instruction selection for code
generation. An example is the `BURG
diff --git a/docs/MIRLangRef.rst b/docs/MIRLangRef.rst
index d5e227a2018c..b4ca8f2347a7 100644
--- a/docs/MIRLangRef.rst
+++ b/docs/MIRLangRef.rst
@@ -78,6 +78,8 @@ Simplifying MIR files
The MIR code coming out of ``-stop-after``/``-stop-before`` is very verbose;
Tests are more accessible and future proof when simplified:
+- Use the ``-simplify-mir`` option with llc.
+
- Machine function attributes often have default values or the test works just
as well with default values. Typical candidates for this are: `alignment:`,
`exposesReturnsTwice`, `legalized`, `regBankSelected`, `selected`.
diff --git a/examples/Kaleidoscope/BuildingAJIT/Chapter1/toy.cpp b/examples/Kaleidoscope/BuildingAJIT/Chapter1/toy.cpp
index 945b9706d4d7..163caa6872d7 100644
--- a/examples/Kaleidoscope/BuildingAJIT/Chapter1/toy.cpp
+++ b/examples/Kaleidoscope/BuildingAJIT/Chapter1/toy.cpp
@@ -1092,7 +1092,7 @@ Function *FunctionAST::codegen() {
TheFunction->eraseFromParent();
if (P.isBinaryOp())
- BinopPrecedence.erase(Proto->getOperatorName());
+ BinopPrecedence.erase(P.getOperatorName());
return nullptr;
}
diff --git a/examples/Kaleidoscope/BuildingAJIT/Chapter2/toy.cpp b/examples/Kaleidoscope/BuildingAJIT/Chapter2/toy.cpp
index 945b9706d4d7..163caa6872d7 100644
--- a/examples/Kaleidoscope/BuildingAJIT/Chapter2/toy.cpp
+++ b/examples/Kaleidoscope/BuildingAJIT/Chapter2/toy.cpp
@@ -1092,7 +1092,7 @@ Function *FunctionAST::codegen() {
TheFunction->eraseFromParent();
if (P.isBinaryOp())
- BinopPrecedence.erase(Proto->getOperatorName());
+ BinopPrecedence.erase(P.getOperatorName());
return nullptr;
}
diff --git a/examples/Kaleidoscope/BuildingAJIT/Chapter3/toy.cpp b/examples/Kaleidoscope/BuildingAJIT/Chapter3/toy.cpp
index 945b9706d4d7..163caa6872d7 100644
--- a/examples/Kaleidoscope/BuildingAJIT/Chapter3/toy.cpp
+++ b/examples/Kaleidoscope/BuildingAJIT/Chapter3/toy.cpp
@@ -1092,7 +1092,7 @@ Function *FunctionAST::codegen() {
TheFunction->eraseFromParent();
if (P.isBinaryOp())
- BinopPrecedence.erase(Proto->getOperatorName());
+ BinopPrecedence.erase(P.getOperatorName());
return nullptr;
}
diff --git a/examples/Kaleidoscope/Chapter6/toy.cpp b/examples/Kaleidoscope/Chapter6/toy.cpp
index 1e0ddca29b61..0c2221735589 100644
--- a/examples/Kaleidoscope/Chapter6/toy.cpp
+++ b/examples/Kaleidoscope/Chapter6/toy.cpp
@@ -932,7 +932,7 @@ Function *FunctionAST::codegen() {
TheFunction->eraseFromParent();
if (P.isBinaryOp())
- BinopPrecedence.erase(Proto->getOperatorName());
+ BinopPrecedence.erase(P.getOperatorName());
return nullptr;
}
diff --git a/examples/Kaleidoscope/Chapter7/toy.cpp b/examples/Kaleidoscope/Chapter7/toy.cpp
index 2f8cb682a847..79ac7b33d7a1 100644
--- a/examples/Kaleidoscope/Chapter7/toy.cpp
+++ b/examples/Kaleidoscope/Chapter7/toy.cpp
@@ -1099,7 +1099,7 @@ Function *FunctionAST::codegen() {
TheFunction->eraseFromParent();
if (P.isBinaryOp())
- BinopPrecedence.erase(Proto->getOperatorName());
+ BinopPrecedence.erase(P.getOperatorName());
return nullptr;
}
diff --git a/examples/Kaleidoscope/Chapter8/toy.cpp b/examples/Kaleidoscope/Chapter8/toy.cpp
index cdf650973b86..3ed98fcfdb5c 100644
--- a/examples/Kaleidoscope/Chapter8/toy.cpp
+++ b/examples/Kaleidoscope/Chapter8/toy.cpp
@@ -1097,7 +1097,7 @@ Function *FunctionAST::codegen() {
TheFunction->eraseFromParent();
if (P.isBinaryOp())
- BinopPrecedence.erase(Proto->getOperatorName());
+ BinopPrecedence.erase(P.getOperatorName());
return nullptr;
}
diff --git a/include/llvm/ADT/APInt.h b/include/llvm/ADT/APInt.h
index 63c92c1a7fce..c3822e35906a 100644
--- a/include/llvm/ADT/APInt.h
+++ b/include/llvm/ADT/APInt.h
@@ -842,6 +842,7 @@ public:
///
/// \returns *this
APInt &operator*=(const APInt &RHS);
+ APInt &operator*=(uint64_t RHS);
/// \brief Addition assignment operator.
///
@@ -2043,6 +2044,16 @@ inline APInt operator-(uint64_t LHS, APInt b) {
return b;
}
+inline APInt operator*(APInt a, uint64_t RHS) {
+ a *= RHS;
+ return a;
+}
+
+inline APInt operator*(uint64_t LHS, APInt b) {
+ b *= LHS;
+ return b;
+}
+
namespace APIntOps {
diff --git a/include/llvm/ADT/BitVector.h b/include/llvm/ADT/BitVector.h
index 5aa101591e6e..e835f1516225 100644
--- a/include/llvm/ADT/BitVector.h
+++ b/include/llvm/ADT/BitVector.h
@@ -217,7 +217,7 @@ public:
unsigned BitPos = Prev % BITWORD_SIZE;
BitWord Copy = Bits[WordPos];
// Mask off previous bits.
- Copy &= ~0UL << BitPos;
+ Copy &= maskTrailingZeros<BitWord>(BitPos);
if (Copy != 0)
return WordPos * BITWORD_SIZE + countTrailingZeros(Copy);
@@ -229,7 +229,7 @@ public:
return -1;
}
- /// find_next_unset - Returns the index of the next usnet bit following the
+ /// find_next_unset - Returns the index of the next unset bit following the
/// "Prev" bit. Returns -1 if all remaining bits are set.
int find_next_unset(unsigned Prev) const {
++Prev;
@@ -253,7 +253,34 @@ public:
return -1;
}
- /// clear - Clear all bits.
+ /// find_prev - Returns the index of the first set bit that precedes the
+ /// the bit at \p PriorTo. Returns -1 if all previous bits are unset.
+ int find_prev(unsigned PriorTo) {
+ if (PriorTo == 0)
+ return -1;
+
+ --PriorTo;
+
+ unsigned WordPos = PriorTo / BITWORD_SIZE;
+ unsigned BitPos = PriorTo % BITWORD_SIZE;
+ BitWord Copy = Bits[WordPos];
+ // Mask off next bits.
+ Copy &= maskTrailingOnes<BitWord>(BitPos + 1);
+
+ if (Copy != 0)
+ return (WordPos + 1) * BITWORD_SIZE - countLeadingZeros(Copy) - 1;
+
+ // Check previous words.
+ for (unsigned i = 1; i <= WordPos; ++i) {
+ unsigned Index = WordPos - i;
+ if (Bits[Index] == 0)
+ continue;
+ return (Index + 1) * BITWORD_SIZE - countLeadingZeros(Bits[Index]) - 1;
+ }
+ return -1;
+ }
+
+ /// clear - Removes all bits from the bitvector. Does not change capacity.
void clear() {
Size = 0;
}
diff --git a/include/llvm/ADT/SmallBitVector.h b/include/llvm/ADT/SmallBitVector.h
index bf16af5933f0..0eeacc162543 100644
--- a/include/llvm/ADT/SmallBitVector.h
+++ b/include/llvm/ADT/SmallBitVector.h
@@ -278,6 +278,24 @@ public:
return getPointer()->find_next_unset(Prev);
}
+ /// find_prev - Returns the index of the first set bit that precedes the
+ /// the bit at \p PriorTo. Returns -1 if all previous bits are unset.
+ int find_prev(unsigned PriorTo) const {
+ if (isSmall()) {
+ if (PriorTo == 0)
+ return -1;
+
+ --PriorTo;
+ uintptr_t Bits = getSmallBits();
+ Bits &= maskTrailingOnes<uintptr_t>(PriorTo + 1);
+ if (Bits == 0)
+ return -1;
+
+ return NumBaseBits - countLeadingZeros(Bits) - 1;
+ }
+ return getPointer()->find_prev(PriorTo);
+ }
+
/// Clear all bits.
void clear() {
if (!isSmall())
diff --git a/include/llvm/Analysis/LoopInfoImpl.h b/include/llvm/Analysis/LoopInfoImpl.h
index 66c9f68afc60..249fa572c024 100644
--- a/include/llvm/Analysis/LoopInfoImpl.h
+++ b/include/llvm/Analysis/LoopInfoImpl.h
@@ -220,8 +220,8 @@ void LoopBase<BlockT, LoopT>::verifyLoop() const {
BI = df_ext_begin(getHeader(), VisitSet),
BE = df_ext_end(getHeader(), VisitSet);
- // Keep track of the number of BBs visited.
- unsigned NumVisited = 0;
+ // Keep track of the BBs visited.
+ SmallPtrSet<BlockT*, 8> VisitedBBs;
// Check the individual blocks.
for ( ; BI != BE; ++BI) {
@@ -259,10 +259,18 @@ void LoopBase<BlockT, LoopT>::verifyLoop() const {
assert(BB != &getHeader()->getParent()->front() &&
"Loop contains function entry block!");
- NumVisited++;
+ VisitedBBs.insert(BB);
}
- assert(NumVisited == getNumBlocks() && "Unreachable block in loop");
+ if (VisitedBBs.size() != getNumBlocks()) {
+ dbgs() << "The following blocks are unreachable in the loop: ";
+ for (auto BB : Blocks) {
+ if (!VisitedBBs.count(BB)) {
+ dbgs() << *BB << "\n";
+ }
+ }
+ assert(false && "Unreachable block in loop");
+ }
// Check the subloops.
for (iterator I = begin(), E = end(); I != E; ++I)
diff --git a/include/llvm/Analysis/ProfileSummaryInfo.h b/include/llvm/Analysis/ProfileSummaryInfo.h
index 1aec35c3e677..75c4cbd03706 100644
--- a/include/llvm/Analysis/ProfileSummaryInfo.h
+++ b/include/llvm/Analysis/ProfileSummaryInfo.h
@@ -54,6 +54,18 @@ public:
ProfileSummaryInfo(Module &M) : M(M) {}
ProfileSummaryInfo(ProfileSummaryInfo &&Arg)
: M(Arg.M), Summary(std::move(Arg.Summary)) {}
+
+ /// Handle the invalidation of this information.
+ ///
+ /// When used as a result of \c ProfileSummaryAnalysis this method will be
+ /// called when the module this was computed for changes. Since profile
+ /// summary is immutable after it is annotated on the module, we return false
+ /// here.
+ bool invalidate(Module &, const PreservedAnalyses &,
+ ModuleAnalysisManager::Invalidator &) {
+ return false;
+ }
+
/// Returns the profile count for \p CallInst.
static Optional<uint64_t> getProfileCount(const Instruction *CallInst,
BlockFrequencyInfo *BFI);
diff --git a/include/llvm/Analysis/ScalarEvolution.h b/include/llvm/Analysis/ScalarEvolution.h
index 54bc4dcfd2cd..85350fa159d6 100644
--- a/include/llvm/Analysis/ScalarEvolution.h
+++ b/include/llvm/Analysis/ScalarEvolution.h
@@ -782,13 +782,13 @@ private:
/// Set the memoized range for the given SCEV.
const ConstantRange &setRange(const SCEV *S, RangeSignHint Hint,
- const ConstantRange &CR) {
+ ConstantRange &&CR) {
DenseMap<const SCEV *, ConstantRange> &Cache =
Hint == HINT_RANGE_UNSIGNED ? UnsignedRanges : SignedRanges;
- auto Pair = Cache.insert({S, CR});
+ auto Pair = Cache.try_emplace(S, std::move(CR));
if (!Pair.second)
- Pair.first->second = CR;
+ Pair.first->second = std::move(CR);
return Pair.first->second;
}
@@ -816,6 +816,10 @@ private:
/// Helper function called from createNodeForPHI.
const SCEV *createAddRecFromPHI(PHINode *PN);
+ /// A helper function for createAddRecFromPHI to handle simple cases.
+ const SCEV *createSimpleAffineAddRec(PHINode *PN, Value *BEValueV,
+ Value *StartValueV);
+
/// Helper function called from createNodeForPHI.
const SCEV *createNodeFromSelectLikePHI(PHINode *PN);
@@ -1565,7 +1569,7 @@ public:
/// delinearization).
void findArrayDimensions(SmallVectorImpl<const SCEV *> &Terms,
SmallVectorImpl<const SCEV *> &Sizes,
- const SCEV *ElementSize) const;
+ const SCEV *ElementSize);
void print(raw_ostream &OS) const;
void verify() const;
diff --git a/include/llvm/Analysis/TargetLibraryInfo.def b/include/llvm/Analysis/TargetLibraryInfo.def
index 637fc7ed30dd..099a3c7cf2ac 100644
--- a/include/llvm/Analysis/TargetLibraryInfo.def
+++ b/include/llvm/Analysis/TargetLibraryInfo.def
@@ -1115,6 +1115,9 @@ TLI_DEFINE_STRING_INTERNAL("vsprintf")
/// int vsscanf(const char *s, const char *format, va_list arg);
TLI_DEFINE_ENUM_INTERNAL(vsscanf)
TLI_DEFINE_STRING_INTERNAL("vsscanf")
+/// size_t wcslen (const wchar_t* wcs);
+TLI_DEFINE_ENUM_INTERNAL(wcslen)
+TLI_DEFINE_STRING_INTERNAL("wcslen")
/// ssize_t write(int fildes, const void *buf, size_t nbyte);
TLI_DEFINE_ENUM_INTERNAL(write)
TLI_DEFINE_STRING_INTERNAL("write")
diff --git a/include/llvm/CodeGen/AsmPrinter.h b/include/llvm/CodeGen/AsmPrinter.h
index fb8c8408fc77..180c0b579248 100644
--- a/include/llvm/CodeGen/AsmPrinter.h
+++ b/include/llvm/CodeGen/AsmPrinter.h
@@ -226,6 +226,7 @@ public:
FUNCTION_EXIT = 1,
TAIL_CALL = 2,
LOG_ARGS_ENTER = 3,
+ CUSTOM_EVENT = 4,
};
// The table will contain these structs that point to the sled, the function
@@ -242,7 +243,7 @@ public:
};
// All the sleds to be emitted.
- std::vector<XRayFunctionEntry> Sleds;
+ SmallVector<XRayFunctionEntry, 4> Sleds;
// Helper function to record a given XRay sled.
void recordSled(MCSymbol *Sled, const MachineInstr &MI, SledKind Kind);
diff --git a/include/llvm/CodeGen/FastISel.h b/include/llvm/CodeGen/FastISel.h
index 2abe3bb11556..57fa0c73d272 100644
--- a/include/llvm/CodeGen/FastISel.h
+++ b/include/llvm/CodeGen/FastISel.h
@@ -506,6 +506,7 @@ protected:
bool selectCast(const User *I, unsigned Opcode);
bool selectExtractValue(const User *I);
bool selectInsertValue(const User *I);
+ bool selectXRayCustomEvent(const CallInst *II);
private:
/// \brief Handle PHI nodes in successor blocks.
diff --git a/include/llvm/CodeGen/FunctionLoweringInfo.h b/include/llvm/CodeGen/FunctionLoweringInfo.h
index 14ee5019ef2f..e7544bd7b70c 100644
--- a/include/llvm/CodeGen/FunctionLoweringInfo.h
+++ b/include/llvm/CodeGen/FunctionLoweringInfo.h
@@ -249,7 +249,7 @@ public:
void AddLiveOutRegInfo(unsigned Reg, unsigned NumSignBits,
const KnownBits &Known) {
// Only install this information if it tells us something.
- if (NumSignBits == 1 && Known.Zero == 0 && Known.One == 0)
+ if (NumSignBits == 1 && Known.isUnknown())
return;
LiveOutRegInfo.grow(Reg);
diff --git a/include/llvm/CodeGen/GlobalISel/IRTranslator.h b/include/llvm/CodeGen/GlobalISel/IRTranslator.h
index 31ffdc0e2e78..e292e8913db0 100644
--- a/include/llvm/CodeGen/GlobalISel/IRTranslator.h
+++ b/include/llvm/CodeGen/GlobalISel/IRTranslator.h
@@ -78,7 +78,7 @@ private:
/// this function.
DenseMap<const AllocaInst *, int> FrameIndices;
- /// Methods for translating form LLVM IR to MachineInstr.
+ /// \name Methods for translating form LLVM IR to MachineInstr.
/// \see ::translate for general information on the translate methods.
/// @{
diff --git a/include/llvm/CodeGen/GlobalISel/MachineIRBuilder.h b/include/llvm/CodeGen/GlobalISel/MachineIRBuilder.h
index 472f50576d96..6b662a7f7413 100644
--- a/include/llvm/CodeGen/GlobalISel/MachineIRBuilder.h
+++ b/include/llvm/CodeGen/GlobalISel/MachineIRBuilder.h
@@ -45,7 +45,7 @@ class MachineIRBuilder {
/// Debug location to be set to any instruction we create.
DebugLoc DL;
- /// Fields describing the insertion point.
+ /// \name Fields describing the insertion point.
/// @{
MachineBasicBlock *MBB;
MachineBasicBlock::iterator II;
@@ -84,7 +84,7 @@ public:
void setInsertPt(MachineBasicBlock &MBB, MachineBasicBlock::iterator II);
/// @}
- /// Setters for the insertion point.
+ /// \name Setters for the insertion point.
/// @{
/// Set the MachineFunction where to build instructions.
void setMF(MachineFunction &);
@@ -98,7 +98,7 @@ public:
void setInstr(MachineInstr &MI);
/// @}
- /// Control where instructions we create are recorded (typically for
+ /// \name Control where instructions we create are recorded (typically for
/// visiting again later during legalization).
/// @{
void recordInsertions(std::function<void(MachineInstr *)> InsertedInstr);
diff --git a/include/llvm/CodeGen/GlobalISel/RegBankSelect.h b/include/llvm/CodeGen/GlobalISel/RegBankSelect.h
index daa8dcf2061b..f610bc02b6f2 100644
--- a/include/llvm/CodeGen/GlobalISel/RegBankSelect.h
+++ b/include/llvm/CodeGen/GlobalISel/RegBankSelect.h
@@ -309,7 +309,7 @@ public:
Impossible
};
- /// Convenient types for a list of insertion points.
+ /// \name Convenient types for a list of insertion points.
/// @{
typedef SmallVector<std::unique_ptr<InsertPoint>, 2> InsertionPoints;
typedef InsertionPoints::iterator insertpt_iterator;
@@ -341,7 +341,7 @@ public:
const TargetRegisterInfo &TRI, Pass &P,
RepairingKind Kind = RepairingKind::Insert);
- /// Getters.
+ /// \name Getters.
/// @{
RepairingKind getKind() const { return Kind; }
unsigned getOpIdx() const { return OpIdx; }
@@ -349,7 +349,7 @@ public:
bool hasSplit() { return HasSplit; }
/// @}
- /// Overloaded methods to add an insertion point.
+ /// \name Overloaded methods to add an insertion point.
/// @{
/// Add a MBBInsertionPoint to the list of InsertPoints.
void addInsertPoint(MachineBasicBlock &MBB, bool Beginning);
@@ -362,7 +362,7 @@ public:
void addInsertPoint(InsertPoint &Point);
/// @}
- /// Accessors related to the insertion points.
+ /// \name Accessors related to the insertion points.
/// @{
insertpt_iterator begin() { return InsertPoints.begin(); }
insertpt_iterator end() { return InsertPoints.end(); }
@@ -561,7 +561,7 @@ private:
/// Find the best mapping for \p MI from \p PossibleMappings.
/// \return a reference on the best mapping in \p PossibleMappings.
- RegisterBankInfo::InstructionMapping &
+ const RegisterBankInfo::InstructionMapping &
findBestMapping(MachineInstr &MI,
RegisterBankInfo::InstructionMappings &PossibleMappings,
SmallVectorImpl<RepairingPlacement> &RepairPts);
diff --git a/include/llvm/CodeGen/GlobalISel/RegisterBankInfo.h b/include/llvm/CodeGen/GlobalISel/RegisterBankInfo.h
index 600733ac6a2d..f32233b3a9e4 100644
--- a/include/llvm/CodeGen/GlobalISel/RegisterBankInfo.h
+++ b/include/llvm/CodeGen/GlobalISel/RegisterBankInfo.h
@@ -264,7 +264,7 @@ public:
/// Convenient type to represent the alternatives for mapping an
/// instruction.
/// \todo When we move to TableGen this should be an array ref.
- typedef SmallVector<InstructionMapping, 4> InstructionMappings;
+ typedef SmallVector<const InstructionMapping *, 4> InstructionMappings;
/// Helper class used to get/create the virtual registers that will be used
/// to replace the MachineOperand when applying a mapping.
@@ -310,7 +310,7 @@ public:
OperandsMapper(MachineInstr &MI, const InstructionMapping &InstrMapping,
MachineRegisterInfo &MRI);
- /// Getters.
+ /// \name Getters.
/// @{
/// The MachineInstr being remapped.
MachineInstr &getMI() const { return MI; }
@@ -378,15 +378,23 @@ protected:
/// Keep dynamically allocated PartialMapping in a separate map.
/// This shouldn't be needed when everything gets TableGen'ed.
- mutable DenseMap<unsigned, std::unique_ptr<const PartialMapping>> MapOfPartialMappings;
+ mutable DenseMap<unsigned, std::unique_ptr<const PartialMapping>>
+ MapOfPartialMappings;
/// Keep dynamically allocated ValueMapping in a separate map.
/// This shouldn't be needed when everything gets TableGen'ed.
- mutable DenseMap<unsigned, std::unique_ptr<const ValueMapping> > MapOfValueMappings;
+ mutable DenseMap<unsigned, std::unique_ptr<const ValueMapping>>
+ MapOfValueMappings;
/// Keep dynamically allocated array of ValueMapping in a separate map.
/// This shouldn't be needed when everything gets TableGen'ed.
- mutable DenseMap<unsigned, std::unique_ptr<ValueMapping[]>> MapOfOperandsMappings;
+ mutable DenseMap<unsigned, std::unique_ptr<ValueMapping[]>>
+ MapOfOperandsMappings;
+
+ /// Keep dynamically allocated InstructionMapping in a separate map.
+ /// This shouldn't be needed when everything gets TableGen'ed.
+ mutable DenseMap<unsigned, std::unique_ptr<const InstructionMapping>>
+ MapOfInstructionMappings;
/// Create a RegisterBankInfo that can accomodate up to \p NumRegBanks
/// RegisterBank instances.
@@ -425,14 +433,14 @@ protected:
/// register, a register class, or a register bank.
/// In other words, this method will likely fail to find a mapping for
/// any generic opcode that has not been lowered by target specific code.
- InstructionMapping getInstrMappingImpl(const MachineInstr &MI) const;
+ const InstructionMapping &getInstrMappingImpl(const MachineInstr &MI) const;
/// Get the uniquely generated PartialMapping for the
/// given arguments.
const PartialMapping &getPartialMapping(unsigned StartIdx, unsigned Length,
const RegisterBank &RegBank) const;
- /// Methods to get a uniquely generated ValueMapping.
+ /// \name Methods to get a uniquely generated ValueMapping.
/// @{
/// The most common ValueMapping consists of a single PartialMapping.
@@ -445,7 +453,7 @@ protected:
unsigned NumBreakDowns) const;
/// @}
- /// Methods to get a uniquely generated array of ValueMapping.
+ /// \name Methods to get a uniquely generated array of ValueMapping.
/// @{
/// Get the uniquely generated array of ValueMapping for the
@@ -478,6 +486,33 @@ protected:
std::initializer_list<const ValueMapping *> OpdsMapping) const;
/// @}
+ /// \name Methods to get a uniquely generated InstructionMapping.
+ /// @{
+
+private:
+ /// Method to get a uniquely generated InstructionMapping.
+ const InstructionMapping &
+ getInstructionMappingImpl(bool IsInvalid, unsigned ID = InvalidMappingID,
+ unsigned Cost = 0,
+ const ValueMapping *OperandsMapping = nullptr,
+ unsigned NumOperands = 0) const;
+
+public:
+ /// Method to get a uniquely generated InstructionMapping.
+ const InstructionMapping &
+ getInstructionMapping(unsigned ID, unsigned Cost,
+ const ValueMapping *OperandsMapping,
+ unsigned NumOperands) const {
+ return getInstructionMappingImpl(/*IsInvalid*/ false, ID, Cost,
+ OperandsMapping, NumOperands);
+ }
+
+ /// Method to get a uniquely generated invalid InstructionMapping.
+ const InstructionMapping &getInvalidInstructionMapping() const {
+ return getInstructionMappingImpl(/*IsInvalid*/ true);
+ }
+ /// @}
+
/// Get the register bank for the \p OpIdx-th operand of \p MI form
/// the encoding constraints, if any.
///
@@ -603,7 +638,8 @@ public:
///
/// \note If returnedVal does not verify MI, this would probably mean
/// that the target does not support that instruction.
- virtual InstructionMapping getInstrMapping(const MachineInstr &MI) const;
+ virtual const InstructionMapping &
+ getInstrMapping(const MachineInstr &MI) const;
/// Get the alternative mappings for \p MI.
/// Alternative in the sense different from getInstrMapping.
diff --git a/lib/CodeGen/MIRPrinter.h b/include/llvm/CodeGen/MIRPrinter.h
index 16aa9038b6b2..c73adc3f2b11 100644
--- a/lib/CodeGen/MIRPrinter.h
+++ b/include/llvm/CodeGen/MIRPrinter.h
@@ -17,9 +17,11 @@
namespace llvm {
+class MachineBasicBlock;
class MachineFunction;
class Module;
class raw_ostream;
+template <typename T> class SmallVectorImpl;
/// Print LLVM IR using the MIR serialization format to the given output stream.
void printMIR(raw_ostream &OS, const Module &M);
@@ -28,6 +30,17 @@ void printMIR(raw_ostream &OS, const Module &M);
/// output stream.
void printMIR(raw_ostream &OS, const MachineFunction &MF);
+/// Determine a possible list of successors of a basic block based on the
+/// basic block machine operand being used inside the block. This should give
+/// you the correct list of successor blocks in most cases except for things
+/// like jump tables where the basic block references can't easily be found.
+/// The MIRPRinter will skip printing successors if they match the result of
+/// this funciton and the parser will use this function to construct a list if
+/// it is missing.
+void guessSuccessors(const MachineBasicBlock &MBB,
+ SmallVectorImpl<MachineBasicBlock*> &Successors,
+ bool &IsFallthrough);
+
} // end namespace llvm
#endif
diff --git a/include/llvm/CodeGen/MachineFrameInfo.h b/include/llvm/CodeGen/MachineFrameInfo.h
index 61be9f775c97..689f3cd9fd12 100644
--- a/include/llvm/CodeGen/MachineFrameInfo.h
+++ b/include/llvm/CodeGen/MachineFrameInfo.h
@@ -520,6 +520,14 @@ public:
bool hasTailCall() const { return HasTailCall; }
void setHasTailCall() { HasTailCall = true; }
+ /// Computes the maximum size of a callframe and the AdjustsStack property.
+ /// This only works for targets defining
+ /// TargetInstrInfo::getCallFrameSetupOpcode(), getCallFrameDestroyOpcode(),
+ /// and getFrameSize().
+ /// This is usually computed by the prologue epilogue inserter but some
+ /// targets may call this to compute it earlier.
+ void computeMaxCallFrameSize(const MachineFunction &MF);
+
/// Return the maximum size of a call frame that must be
/// allocated for an outgoing function call. This is only available if
/// CallFrameSetup/Destroy pseudo instructions are used by the target, and
diff --git a/include/llvm/CodeGen/MachineModuleInfo.h b/include/llvm/CodeGen/MachineModuleInfo.h
index 182d23ef3c90..f46ef41879d1 100644
--- a/include/llvm/CodeGen/MachineModuleInfo.h
+++ b/include/llvm/CodeGen/MachineModuleInfo.h
@@ -116,7 +116,7 @@ class MachineModuleInfo : public ImmutablePass {
// TODO: Ideally, what we'd like is to have a switch that allows emitting
// synchronous (precise at call-sites only) CFA into .eh_frame. However,
- // even under this switch, we'd like .debug_frame to be precise when using.
+ // even under this switch, we'd like .debug_frame to be precise when using
// -g. At this moment, there's no way to specify that some CFI directives
// go into .eh_frame only, while others go into .debug_frame only.
diff --git a/include/llvm/DebugInfo/CodeView/TypeDatabase.h b/include/llvm/DebugInfo/CodeView/TypeDatabase.h
index 220de4bf0ee4..be7b19e7df0c 100644
--- a/include/llvm/DebugInfo/CodeView/TypeDatabase.h
+++ b/include/llvm/DebugInfo/CodeView/TypeDatabase.h
@@ -21,7 +21,7 @@ namespace llvm {
namespace codeview {
class TypeDatabase {
public:
- TypeDatabase() : TypeNameStorage(Allocator) {}
+ explicit TypeDatabase(uint32_t ExpectedSize);
/// Gets the type index for the next type record.
TypeIndex getNextTypeIndex() const;
diff --git a/include/llvm/DebugInfo/DWARF/DWARFContext.h b/include/llvm/DebugInfo/DWARF/DWARFContext.h
index b9f3425d5deb..3fae8b441439 100644
--- a/include/llvm/DebugInfo/DWARF/DWARFContext.h
+++ b/include/llvm/DebugInfo/DWARF/DWARFContext.h
@@ -310,6 +310,11 @@ class DWARFContextInMemory : public DWARFContext {
StringRef *MapSectionToMember(StringRef Name);
+ /// If Sec is compressed section, decompresses and updates its contents
+ /// provided by Data. Otherwise leaves it unchanged.
+ Error maybeDecompress(const object::SectionRef &Sec, StringRef Name,
+ StringRef &Data);
+
public:
DWARFContextInMemory(const object::ObjectFile &Obj,
const LoadedObjectInfo *L = nullptr);
diff --git a/include/llvm/DebugInfo/DWARF/DWARFFormValue.h b/include/llvm/DebugInfo/DWARF/DWARFFormValue.h
index 36b27228f5c6..f3516ebdecba 100644
--- a/include/llvm/DebugInfo/DWARF/DWARFFormValue.h
+++ b/include/llvm/DebugInfo/DWARF/DWARFFormValue.h
@@ -39,20 +39,18 @@ public:
private:
struct ValueType {
- ValueType() {
- uval = 0;
- }
+ ValueType() { uval = 0; }
union {
uint64_t uval;
int64_t sval;
- const char* cstr;
+ const char *cstr;
};
- const uint8_t* data = nullptr;
+ const uint8_t *data = nullptr;
};
- dwarf::Form Form; // Form for this value.
- ValueType Value; // Contains all data for the form.
+ dwarf::Form Form; // Form for this value.
+ ValueType Value; // Contains all data for the form.
const DWARFUnit *U = nullptr; // Remember the DWARFUnit at extract time.
public:
@@ -84,7 +82,7 @@ public:
const DWARFUnit *U);
bool isInlinedCStr() const {
- return Value.data != nullptr && Value.data == (const uint8_t*)Value.cstr;
+ return Value.data != nullptr && Value.data == (const uint8_t *)Value.cstr;
}
/// getAsFoo functions below return the extracted value as Foo if only
@@ -135,45 +133,45 @@ public:
uint8_t AddrSize,
llvm::dwarf::DwarfFormat Format);
- /// Skip a form in \p debug_info_data at offset specified by \p offset_ptr.
+ /// Skip a form in \p DebugInfoData at offset specified by \p OffsetPtr.
///
/// Skips the bytes for this form in the debug info and updates the offset.
///
- /// \param debug_info_data the .debug_info data to use to skip the value.
- /// \param offset_ptr a reference to the offset that will be updated.
+ /// \param DebugInfoData the .debug_info data to use to skip the value.
+ /// \param OffsetPtr a reference to the offset that will be updated.
/// \param U the DWARFUnit to use when skipping the form in case the form
/// size differs according to data in the DWARFUnit.
/// \returns true on success, false if the form was not skipped.
- bool skipValue(DataExtractor debug_info_data, uint32_t *offset_ptr,
+ bool skipValue(DataExtractor DebugInfoData, uint32_t *OffsetPtr,
const DWARFUnit *U) const;
- /// Skip a form in \p debug_info_data at offset specified by \p offset_ptr.
+ /// Skip a form in \p DebugInfoData at offset specified by \p OffsetPtr.
///
/// Skips the bytes for this form in the debug info and updates the offset.
///
- /// \param form the DW_FORM enumeration that indicates the form to skip.
- /// \param debug_info_data the .debug_info data to use to skip the value.
- /// \param offset_ptr a reference to the offset that will be updated.
+ /// \param Form the DW_FORM enumeration that indicates the form to skip.
+ /// \param DebugInfoData the .debug_info data to use to skip the value.
+ /// \param OffsetPtr a reference to the offset that will be updated.
/// \param U the DWARFUnit to use when skipping the form in case the form
/// size differs according to data in the DWARFUnit.
/// \returns true on success, false if the form was not skipped.
- static bool skipValue(dwarf::Form form, DataExtractor debug_info_data,
- uint32_t *offset_ptr, const DWARFUnit *U);
+ static bool skipValue(dwarf::Form Form, DataExtractor DebugInfoData,
+ uint32_t *OffsetPtr, const DWARFUnit *U);
- /// Skip a form in \p debug_info_data at offset specified by \p offset_ptr.
+ /// Skip a form in \p DebugInfoData at offset specified by \p OffsetPtr.
///
/// Skips the bytes for this form in the debug info and updates the offset.
///
- /// \param form the DW_FORM enumeration that indicates the form to skip.
- /// \param debug_info_data the .debug_info data to use to skip the value.
- /// \param offset_ptr a reference to the offset that will be updated.
+ /// \param Form the DW_FORM enumeration that indicates the form to skip.
+ /// \param DebugInfoData the .debug_info data to use to skip the value.
+ /// \param OffsetPtr a reference to the offset that will be updated.
/// \param Version DWARF version number.
/// \param AddrSize size of an address in bytes.
/// \param Format enum value from llvm::dwarf::DwarfFormat.
/// \returns true on success, false if the form was not skipped.
- static bool skipValue(dwarf::Form form, DataExtractor debug_info_data,
- uint32_t *offset_ptr, uint16_t Version,
- uint8_t AddrSize, llvm::dwarf::DwarfFormat Format);
+ static bool skipValue(dwarf::Form Form, DataExtractor DebugInfoData,
+ uint32_t *OffsetPtr, uint16_t Version, uint8_t AddrSize,
+ llvm::dwarf::DwarfFormat Format);
private:
void dumpString(raw_ostream &OS) const;
@@ -181,149 +179,146 @@ private:
namespace dwarf {
- /// Take an optional DWARFFormValue and try to extract a string value from it.
- ///
- /// \param V and optional DWARFFormValue to attempt to extract the value from.
- /// \returns an optional value that contains a value if the form value
- /// was valid and was a string.
- inline Optional<const char*> toString(const Optional<DWARFFormValue>& V) {
- if (V)
- return V->getAsCString();
- return None;
- }
-
- /// Take an optional DWARFFormValue and extract a string value from it.
- ///
- /// \param V and optional DWARFFormValue to attempt to extract the value from.
- /// \param Default the default value to return in case of failure.
- /// \returns the string value or Default if the V doesn't have a value or the
- /// form value's encoding wasn't a string.
- inline const char*
- toString(const Optional<DWARFFormValue>& V, const char *Default) {
- return toString(V).getValueOr(Default);
- }
-
- /// Take an optional DWARFFormValue and try to extract an unsigned constant.
- ///
- /// \param V and optional DWARFFormValue to attempt to extract the value from.
- /// \returns an optional value that contains a value if the form value
- /// was valid and has a unsigned constant form.
- inline Optional<uint64_t> toUnsigned(const Optional<DWARFFormValue>& V) {
- if (V)
- return V->getAsUnsignedConstant();
- return None;
- }
-
- /// Take an optional DWARFFormValue and extract a unsigned constant.
- ///
- /// \param V and optional DWARFFormValue to attempt to extract the value from.
- /// \param Default the default value to return in case of failure.
- /// \returns the extracted unsigned value or Default if the V doesn't have a
- /// value or the form value's encoding wasn't an unsigned constant form.
- inline uint64_t
- toUnsigned(const Optional<DWARFFormValue>& V, uint64_t Default) {
- return toUnsigned(V).getValueOr(Default);
- }
-
- /// Take an optional DWARFFormValue and try to extract an reference.
- ///
- /// \param V and optional DWARFFormValue to attempt to extract the value from.
- /// \returns an optional value that contains a value if the form value
- /// was valid and has a reference form.
- inline Optional<uint64_t> toReference(const Optional<DWARFFormValue>& V) {
- if (V)
- return V->getAsReference();
- return None;
- }
-
- /// Take an optional DWARFFormValue and extract a reference.
- ///
- /// \param V and optional DWARFFormValue to attempt to extract the value from.
- /// \param Default the default value to return in case of failure.
- /// \returns the extracted reference value or Default if the V doesn't have a
- /// value or the form value's encoding wasn't a reference form.
- inline uint64_t
- toReference(const Optional<DWARFFormValue>& V, uint64_t Default) {
- return toReference(V).getValueOr(Default);
- }
-
- /// Take an optional DWARFFormValue and try to extract an signed constant.
- ///
- /// \param V and optional DWARFFormValue to attempt to extract the value from.
- /// \returns an optional value that contains a value if the form value
- /// was valid and has a signed constant form.
- inline Optional<int64_t> toSigned(const Optional<DWARFFormValue>& V) {
- if (V)
- return V->getAsSignedConstant();
- return None;
- }
-
- /// Take an optional DWARFFormValue and extract a signed integer.
- ///
- /// \param V and optional DWARFFormValue to attempt to extract the value from.
- /// \param Default the default value to return in case of failure.
- /// \returns the extracted signed integer value or Default if the V doesn't
- /// have a value or the form value's encoding wasn't a signed integer form.
- inline int64_t
- toSigned(const Optional<DWARFFormValue>& V, int64_t Default) {
- return toSigned(V).getValueOr(Default);
- }
-
- /// Take an optional DWARFFormValue and try to extract an address.
- ///
- /// \param V and optional DWARFFormValue to attempt to extract the value from.
- /// \returns an optional value that contains a value if the form value
- /// was valid and has a address form.
- inline Optional<uint64_t> toAddress(const Optional<DWARFFormValue>& V) {
- if (V)
- return V->getAsAddress();
- return None;
- }
-
- /// Take an optional DWARFFormValue and extract a address.
- ///
- /// \param V and optional DWARFFormValue to attempt to extract the value from.
- /// \param Default the default value to return in case of failure.
- /// \returns the extracted address value or Default if the V doesn't have a
- /// value or the form value's encoding wasn't an address form.
- inline uint64_t
- toAddress(const Optional<DWARFFormValue>& V, uint64_t Default) {
- return toAddress(V).getValueOr(Default);
- }
-
- /// Take an optional DWARFFormValue and try to extract an section offset.
- ///
- /// \param V and optional DWARFFormValue to attempt to extract the value from.
- /// \returns an optional value that contains a value if the form value
- /// was valid and has a section offset form.
- inline Optional<uint64_t> toSectionOffset(const Optional<DWARFFormValue>& V) {
- if (V)
- return V->getAsSectionOffset();
- return None;
- }
-
- /// Take an optional DWARFFormValue and extract a section offset.
- ///
- /// \param V and optional DWARFFormValue to attempt to extract the value from.
- /// \param Default the default value to return in case of failure.
- /// \returns the extracted section offset value or Default if the V doesn't
- /// have a value or the form value's encoding wasn't a section offset form.
- inline uint64_t
- toSectionOffset(const Optional<DWARFFormValue>& V, uint64_t Default) {
- return toSectionOffset(V).getValueOr(Default);
- }
-
- /// Take an optional DWARFFormValue and try to extract block data.
- ///
- /// \param V and optional DWARFFormValue to attempt to extract the value from.
- /// \returns an optional value that contains a value if the form value
- /// was valid and has a block form.
- inline Optional<ArrayRef<uint8_t>>
- toBlock(const Optional<DWARFFormValue>& V) {
- if (V)
- return V->getAsBlock();
- return None;
- }
+/// Take an optional DWARFFormValue and try to extract a string value from it.
+///
+/// \param V and optional DWARFFormValue to attempt to extract the value from.
+/// \returns an optional value that contains a value if the form value
+/// was valid and was a string.
+inline Optional<const char *> toString(const Optional<DWARFFormValue> &V) {
+ if (V)
+ return V->getAsCString();
+ return None;
+}
+
+/// Take an optional DWARFFormValue and extract a string value from it.
+///
+/// \param V and optional DWARFFormValue to attempt to extract the value from.
+/// \param Default the default value to return in case of failure.
+/// \returns the string value or Default if the V doesn't have a value or the
+/// form value's encoding wasn't a string.
+inline const char *toString(const Optional<DWARFFormValue> &V,
+ const char *Default) {
+ return toString(V).getValueOr(Default);
+}
+
+/// Take an optional DWARFFormValue and try to extract an unsigned constant.
+///
+/// \param V and optional DWARFFormValue to attempt to extract the value from.
+/// \returns an optional value that contains a value if the form value
+/// was valid and has a unsigned constant form.
+inline Optional<uint64_t> toUnsigned(const Optional<DWARFFormValue> &V) {
+ if (V)
+ return V->getAsUnsignedConstant();
+ return None;
+}
+
+/// Take an optional DWARFFormValue and extract a unsigned constant.
+///
+/// \param V and optional DWARFFormValue to attempt to extract the value from.
+/// \param Default the default value to return in case of failure.
+/// \returns the extracted unsigned value or Default if the V doesn't have a
+/// value or the form value's encoding wasn't an unsigned constant form.
+inline uint64_t toUnsigned(const Optional<DWARFFormValue> &V,
+ uint64_t Default) {
+ return toUnsigned(V).getValueOr(Default);
+}
+
+/// Take an optional DWARFFormValue and try to extract an reference.
+///
+/// \param V and optional DWARFFormValue to attempt to extract the value from.
+/// \returns an optional value that contains a value if the form value
+/// was valid and has a reference form.
+inline Optional<uint64_t> toReference(const Optional<DWARFFormValue> &V) {
+ if (V)
+ return V->getAsReference();
+ return None;
+}
+
+/// Take an optional DWARFFormValue and extract a reference.
+///
+/// \param V and optional DWARFFormValue to attempt to extract the value from.
+/// \param Default the default value to return in case of failure.
+/// \returns the extracted reference value or Default if the V doesn't have a
+/// value or the form value's encoding wasn't a reference form.
+inline uint64_t toReference(const Optional<DWARFFormValue> &V,
+ uint64_t Default) {
+ return toReference(V).getValueOr(Default);
+}
+
+/// Take an optional DWARFFormValue and try to extract an signed constant.
+///
+/// \param V and optional DWARFFormValue to attempt to extract the value from.
+/// \returns an optional value that contains a value if the form value
+/// was valid and has a signed constant form.
+inline Optional<int64_t> toSigned(const Optional<DWARFFormValue> &V) {
+ if (V)
+ return V->getAsSignedConstant();
+ return None;
+}
+
+/// Take an optional DWARFFormValue and extract a signed integer.
+///
+/// \param V and optional DWARFFormValue to attempt to extract the value from.
+/// \param Default the default value to return in case of failure.
+/// \returns the extracted signed integer value or Default if the V doesn't
+/// have a value or the form value's encoding wasn't a signed integer form.
+inline int64_t toSigned(const Optional<DWARFFormValue> &V, int64_t Default) {
+ return toSigned(V).getValueOr(Default);
+}
+
+/// Take an optional DWARFFormValue and try to extract an address.
+///
+/// \param V and optional DWARFFormValue to attempt to extract the value from.
+/// \returns an optional value that contains a value if the form value
+/// was valid and has a address form.
+inline Optional<uint64_t> toAddress(const Optional<DWARFFormValue> &V) {
+ if (V)
+ return V->getAsAddress();
+ return None;
+}
+
+/// Take an optional DWARFFormValue and extract a address.
+///
+/// \param V and optional DWARFFormValue to attempt to extract the value from.
+/// \param Default the default value to return in case of failure.
+/// \returns the extracted address value or Default if the V doesn't have a
+/// value or the form value's encoding wasn't an address form.
+inline uint64_t toAddress(const Optional<DWARFFormValue> &V, uint64_t Default) {
+ return toAddress(V).getValueOr(Default);
+}
+
+/// Take an optional DWARFFormValue and try to extract an section offset.
+///
+/// \param V and optional DWARFFormValue to attempt to extract the value from.
+/// \returns an optional value that contains a value if the form value
+/// was valid and has a section offset form.
+inline Optional<uint64_t> toSectionOffset(const Optional<DWARFFormValue> &V) {
+ if (V)
+ return V->getAsSectionOffset();
+ return None;
+}
+
+/// Take an optional DWARFFormValue and extract a section offset.
+///
+/// \param V and optional DWARFFormValue to attempt to extract the value from.
+/// \param Default the default value to return in case of failure.
+/// \returns the extracted section offset value or Default if the V doesn't
+/// have a value or the form value's encoding wasn't a section offset form.
+inline uint64_t toSectionOffset(const Optional<DWARFFormValue> &V,
+ uint64_t Default) {
+ return toSectionOffset(V).getValueOr(Default);
+}
+
+/// Take an optional DWARFFormValue and try to extract block data.
+///
+/// \param V and optional DWARFFormValue to attempt to extract the value from.
+/// \returns an optional value that contains a value if the form value
+/// was valid and has a block form.
+inline Optional<ArrayRef<uint8_t>> toBlock(const Optional<DWARFFormValue> &V) {
+ if (V)
+ return V->getAsBlock();
+ return None;
+}
} // end namespace dwarf
diff --git a/include/llvm/DebugInfo/PDB/Native/DbiModuleDescriptor.h b/include/llvm/DebugInfo/PDB/Native/DbiModuleDescriptor.h
index d1f791b9daed..7e77f5a3eef9 100644
--- a/include/llvm/DebugInfo/PDB/Native/DbiModuleDescriptor.h
+++ b/include/llvm/DebugInfo/PDB/Native/DbiModuleDescriptor.h
@@ -53,14 +53,6 @@ private:
const ModuleInfoHeader *Layout = nullptr;
};
-struct ModuleInfoEx {
- ModuleInfoEx(const DbiModuleDescriptor &Info) : Info(Info) {}
- ModuleInfoEx(const ModuleInfoEx &Ex) = default;
-
- DbiModuleDescriptor Info;
- std::vector<StringRef> SourceFiles;
-};
-
} // end namespace pdb
template <> struct VarStreamArrayExtractor<pdb::DbiModuleDescriptor> {
diff --git a/include/llvm/DebugInfo/PDB/Native/DbiModuleList.h b/include/llvm/DebugInfo/PDB/Native/DbiModuleList.h
new file mode 100644
index 000000000000..bcf1cff8f6e5
--- /dev/null
+++ b/include/llvm/DebugInfo/PDB/Native/DbiModuleList.h
@@ -0,0 +1,114 @@
+//===- DbiModuleList.h - PDB module information list ------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_DEBUGINFO_PDB_RAW_DBIMODULELIST_H
+#define LLVM_DEBUGINFO_PDB_RAW_DBIMODULELIST_H
+
+#include "llvm/ADT/StringRef.h"
+#include "llvm/ADT/iterator.h"
+#include "llvm/DebugInfo/PDB/Native/DbiModuleDescriptor.h"
+#include "llvm/Support/BinaryStreamArray.h"
+#include "llvm/Support/BinaryStreamRef.h"
+#include "llvm/Support/Endian.h"
+#include "llvm/Support/Error.h"
+#include <cstdint>
+#include <vector>
+
+namespace llvm {
+namespace pdb {
+
+class DbiModuleList;
+struct FileInfoSubstreamHeader;
+
+class DbiModuleSourceFilesIterator
+ : public iterator_facade_base<DbiModuleSourceFilesIterator,
+ std::random_access_iterator_tag, StringRef> {
+ typedef iterator_facade_base<DbiModuleSourceFilesIterator,
+ std::random_access_iterator_tag, StringRef>
+ BaseType;
+
+public:
+ DbiModuleSourceFilesIterator(const DbiModuleList &Modules, uint32_t Modi,
+ uint16_t Filei);
+ DbiModuleSourceFilesIterator() = default;
+ DbiModuleSourceFilesIterator &
+ operator=(const DbiModuleSourceFilesIterator &R) = default;
+
+ bool operator==(const DbiModuleSourceFilesIterator &R) const;
+
+ const StringRef &operator*() const { return ThisValue; }
+ StringRef &operator*() { return ThisValue; }
+
+ bool operator<(const DbiModuleSourceFilesIterator &RHS) const;
+ std::ptrdiff_t operator-(const DbiModuleSourceFilesIterator &R) const;
+ DbiModuleSourceFilesIterator &operator+=(std::ptrdiff_t N);
+ DbiModuleSourceFilesIterator &operator-=(std::ptrdiff_t N);
+
+private:
+ void setValue();
+
+ bool isEnd() const;
+ bool isCompatible(const DbiModuleSourceFilesIterator &R) const;
+ bool isUniversalEnd() const;
+
+ StringRef ThisValue;
+ const DbiModuleList *Modules{nullptr};
+ uint32_t Modi{0};
+ uint16_t Filei{0};
+};
+
+class DbiModuleList {
+ friend DbiModuleSourceFilesIterator;
+
+public:
+ Error initialize(BinaryStreamRef ModInfo, BinaryStreamRef FileInfo);
+
+ Expected<StringRef> getFileName(uint32_t Index) const;
+ uint32_t getModuleCount() const;
+ uint32_t getSourceFileCount() const;
+ uint16_t getSourceFileCount(uint32_t Modi) const;
+
+ iterator_range<DbiModuleSourceFilesIterator>
+ source_files(uint32_t Modi) const;
+
+ DbiModuleDescriptor getModuleDescriptor(uint32_t Modi) const;
+
+private:
+ Error initializeModInfo(BinaryStreamRef ModInfo);
+ Error initializeFileInfo(BinaryStreamRef FileInfo);
+
+ VarStreamArray<DbiModuleDescriptor> Descriptors;
+
+ FixedStreamArray<support::little32_t> FileNameOffsets;
+ FixedStreamArray<support::ulittle16_t> ModFileCountArray;
+
+ // For each module, there are multiple filenames, which can be obtained by
+ // knowing the index of the file. Given the index of the file, one can use
+ // that as an offset into the FileNameOffsets array, which contains the
+ // absolute offset of the file name in NamesBuffer. Thus, for each module
+ // we store the first index in the FileNameOffsets array for this module.
+ // The number of files for the corresponding module is stored in
+ // ModFileCountArray.
+ std::vector<uint32_t> ModuleInitialFileIndex;
+
+ // In order to provide random access into the Descriptors array, we iterate it
+ // once up front to find the offsets of the individual items and store them in
+ // this array.
+ std::vector<uint32_t> ModuleDescriptorOffsets;
+
+ const FileInfoSubstreamHeader *FileInfoHeader = nullptr;
+
+ BinaryStreamRef ModInfoSubstream;
+ BinaryStreamRef FileInfoSubstream;
+ BinaryStreamRef NamesBuffer;
+};
+}
+}
+
+#endif // LLVM_DEBUGINFO_PDB_RAW_DBIMODULELIST_H \ No newline at end of file
diff --git a/include/llvm/DebugInfo/PDB/Native/DbiStream.h b/include/llvm/DebugInfo/PDB/Native/DbiStream.h
index 08262e47f77f..8f95481f4152 100644
--- a/include/llvm/DebugInfo/PDB/Native/DbiStream.h
+++ b/include/llvm/DebugInfo/PDB/Native/DbiStream.h
@@ -13,6 +13,7 @@
#include "llvm/DebugInfo/CodeView/ModuleDebugFragment.h"
#include "llvm/DebugInfo/MSF/MappedBlockStream.h"
#include "llvm/DebugInfo/PDB/Native/DbiModuleDescriptor.h"
+#include "llvm/DebugInfo/PDB/Native/DbiModuleList.h"
#include "llvm/DebugInfo/PDB/Native/PDBStringTable.h"
#include "llvm/DebugInfo/PDB/Native/RawConstants.h"
#include "llvm/DebugInfo/PDB/Native/RawTypes.h"
@@ -68,9 +69,7 @@ public:
/// not present, returns InvalidStreamIndex.
uint32_t getDebugStreamIndex(DbgHeaderType Type) const;
- ArrayRef<ModuleInfoEx> modules() const;
-
- Expected<StringRef> getFileNameForIndex(uint32_t Index) const;
+ const DbiModuleList &modules() const;
FixedStreamArray<object::coff_section> getSectionHeaders();
@@ -80,27 +79,22 @@ public:
void visitSectionContributions(ISectionContribVisitor &Visitor) const;
private:
- Error initializeModInfoArray();
Error initializeSectionContributionData();
Error initializeSectionHeadersData();
Error initializeSectionMapData();
- Error initializeFileInfo();
Error initializeFpoRecords();
PDBFile &Pdb;
std::unique_ptr<msf::MappedBlockStream> Stream;
- std::vector<ModuleInfoEx> ModuleInfos;
PDBStringTable ECNames;
- BinaryStreamRef ModInfoSubstream;
BinaryStreamRef SecContrSubstream;
BinaryStreamRef SecMapSubstream;
- BinaryStreamRef FileInfoSubstream;
BinaryStreamRef TypeServerMapSubstream;
BinaryStreamRef ECSubstream;
- BinaryStreamRef NamesBuffer;
+ DbiModuleList Modules;
FixedStreamArray<support::ulittle16_t> DbgStreams;
@@ -108,7 +102,6 @@ private:
FixedStreamArray<SectionContrib> SectionContribs;
FixedStreamArray<SectionContrib2> SectionContribs2;
FixedStreamArray<SecMapEntry> SectionMap;
- FixedStreamArray<support::little32_t> FileNameOffsets;
std::unique_ptr<msf::MappedBlockStream> SectionHeaderStream;
FixedStreamArray<object::coff_section> SectionHeaders;
diff --git a/include/llvm/DebugInfo/PDB/Native/NativeCompilandSymbol.h b/include/llvm/DebugInfo/PDB/Native/NativeCompilandSymbol.h
index b1d980679a45..22ed61910d94 100644
--- a/include/llvm/DebugInfo/PDB/Native/NativeCompilandSymbol.h
+++ b/include/llvm/DebugInfo/PDB/Native/NativeCompilandSymbol.h
@@ -18,7 +18,7 @@ namespace pdb {
class NativeCompilandSymbol : public NativeRawSymbol {
public:
- NativeCompilandSymbol(NativeSession &Session, const ModuleInfoEx &MI);
+ NativeCompilandSymbol(NativeSession &Session, DbiModuleDescriptor MI);
PDB_SymType getSymTag() const override;
bool isEditAndContinueEnabled() const override;
uint32_t getLexicalParentId() const override;
@@ -26,7 +26,7 @@ public:
std::string getName() const override;
private:
- ModuleInfoEx Module;
+ DbiModuleDescriptor Module;
};
} // namespace pdb
diff --git a/include/llvm/DebugInfo/PDB/Native/NativeEnumModules.h b/include/llvm/DebugInfo/PDB/Native/NativeEnumModules.h
index 18022f599bba..6aa1460dbb4e 100644
--- a/include/llvm/DebugInfo/PDB/Native/NativeEnumModules.h
+++ b/include/llvm/DebugInfo/PDB/Native/NativeEnumModules.h
@@ -16,13 +16,13 @@
namespace llvm {
namespace pdb {
+class DbiModuleList;
class NativeSession;
class NativeEnumModules : public IPDBEnumChildren<PDBSymbol> {
public:
- explicit NativeEnumModules(NativeSession &Session,
- ArrayRef<ModuleInfoEx> Modules,
- uint32_t Index = 0);
+ NativeEnumModules(NativeSession &Session, const DbiModuleList &Modules,
+ uint32_t Index = 0);
uint32_t getChildCount() const override;
std::unique_ptr<PDBSymbol> getChildAtIndex(uint32_t Index) const override;
@@ -32,7 +32,7 @@ public:
private:
NativeSession &Session;
- ArrayRef<ModuleInfoEx> Modules;
+ const DbiModuleList &Modules;
uint32_t Index;
};
}
diff --git a/include/llvm/DebugInfo/PDB/Native/RawTypes.h b/include/llvm/DebugInfo/PDB/Native/RawTypes.h
index 93622d0a4394..979b8454dd5e 100644
--- a/include/llvm/DebugInfo/PDB/Native/RawTypes.h
+++ b/include/llvm/DebugInfo/PDB/Native/RawTypes.h
@@ -211,7 +211,7 @@ struct ModInfoFlags {
};
/// The header preceeding each entry in the Module Info substream of the DBI
-/// stream.
+/// stream. Corresponds to the type MODI in the reference implementation.
struct ModuleInfoHeader {
/// Currently opened module. This field is a pointer in the reference
/// implementation, but that won't work on 64-bit systems, and anyway it
@@ -243,9 +243,12 @@ struct ModuleInfoHeader {
/// Padding so the next field is 4-byte aligned.
char Padding1[2];
- /// Array of [0..NumFiles) DBI name buffer offsets. This field is a pointer
- /// in the reference implementation, but as with `Mod`, we ignore it for now
- /// since it is unused.
+ /// Array of [0..NumFiles) DBI name buffer offsets. In the reference
+ /// implementation this field is a pointer. But since you can't portably
+ /// serialize a pointer, on 64-bit platforms they copy all the values except
+ /// this one into the 32-bit version of the struct and use that for
+ /// serialization. Regardless, this field is unused, it is only there to
+ /// store a pointer that can be accessed at runtime.
support::ulittle32_t FileNameOffs;
/// Name Index for src file name
diff --git a/include/llvm/DebugInfo/PDB/Native/TpiStream.h b/include/llvm/DebugInfo/PDB/Native/TpiStream.h
index 62dde0ef08b7..9fef9bee5e1a 100644
--- a/include/llvm/DebugInfo/PDB/Native/TpiStream.h
+++ b/include/llvm/DebugInfo/PDB/Native/TpiStream.h
@@ -40,12 +40,12 @@ public:
uint32_t TypeIndexBegin() const;
uint32_t TypeIndexEnd() const;
- uint32_t NumTypeRecords() const;
+ uint32_t getNumTypeRecords() const;
uint16_t getTypeHashStreamIndex() const;
uint16_t getTypeHashStreamAuxIndex() const;
uint32_t getHashKeySize() const;
- uint32_t NumHashBuckets() const;
+ uint32_t getNumHashBuckets() const;
FixedStreamArray<support::ulittle32_t> getHashValues() const;
FixedStreamArray<TypeIndexOffset> getTypeIndexOffsets() const;
HashTable &getHashAdjusters();
@@ -55,8 +55,6 @@ public:
Error commit();
private:
- Error verifyHashValues();
-
const PDBFile &Pdb;
std::unique_ptr<msf::MappedBlockStream> Stream;
diff --git a/include/llvm/ExecutionEngine/Orc/RPCSerialization.h b/include/llvm/ExecutionEngine/Orc/RPCSerialization.h
index a3be242b4457..1cb2448a3a44 100644
--- a/include/llvm/ExecutionEngine/Orc/RPCSerialization.h
+++ b/include/llvm/ExecutionEngine/Orc/RPCSerialization.h
@@ -355,7 +355,7 @@ public:
std::move(Deserialize)));
KeyName = &I->first;
}
-
+
{
assert(KeyName != nullptr && "No keyname pointer");
std::lock_guard<std::recursive_mutex> Lock(SerializersMutex);
@@ -370,7 +370,7 @@ public:
};
}
}
-
+
static Error serialize(ChannelT &C, Error &&Err) {
std::lock_guard<std::recursive_mutex> Lock(SerializersMutex);
diff --git a/include/llvm/ExecutionEngine/RuntimeDyldChecker.h b/include/llvm/ExecutionEngine/RuntimeDyldChecker.h
index f5f52b5d2f92..de89f405af4c 100644
--- a/include/llvm/ExecutionEngine/RuntimeDyldChecker.h
+++ b/include/llvm/ExecutionEngine/RuntimeDyldChecker.h
@@ -10,6 +10,8 @@
#ifndef LLVM_EXECUTIONENGINE_RUNTIMEDYLDCHECKER_H
#define LLVM_EXECUTIONENGINE_RUNTIMEDYLDCHECKER_H
+#include "llvm/ADT/Optional.h"
+
#include <cstdint>
#include <memory>
#include <string>
@@ -97,6 +99,10 @@ public:
StringRef SectionName,
bool LocalAddress);
+ /// \brief If there is a section at the given local address, return its load
+ /// address, otherwise return none.
+ Optional<uint64_t> getSectionLoadAddress(void *LocalAddress) const;
+
private:
std::unique_ptr<RuntimeDyldCheckerImpl> Impl;
};
diff --git a/include/llvm/IR/Attributes.h b/include/llvm/IR/Attributes.h
index adcb7266073b..cbe681684a5c 100644
--- a/include/llvm/IR/Attributes.h
+++ b/include/llvm/IR/Attributes.h
@@ -244,7 +244,8 @@ public:
std::pair<unsigned, Optional<unsigned>> getAllocSizeArgs() const;
std::string getAsString(bool InAttrGrp = false) const;
- typedef const Attribute *iterator;
+ using iterator = const Attribute *;
+
iterator begin() const;
iterator end() const;
};
@@ -479,7 +480,7 @@ public:
/// \brief Return the attributes at the index as a string.
std::string getAsString(unsigned Index, bool InAttrGrp = false) const;
- typedef ArrayRef<Attribute>::iterator iterator;
+ using iterator = ArrayRef<Attribute>::iterator;
iterator begin(unsigned Slot) const;
iterator end(unsigned Slot) const;
@@ -662,11 +663,11 @@ public:
bool empty() const { return Attrs.none(); }
// Iterators for target-dependent attributes.
- typedef std::pair<std::string, std::string> td_type;
- typedef std::map<std::string, std::string>::iterator td_iterator;
- typedef std::map<std::string, std::string>::const_iterator td_const_iterator;
- typedef iterator_range<td_iterator> td_range;
- typedef iterator_range<td_const_iterator> td_const_range;
+ using td_type = std::pair<std::string, std::string>;
+ using td_iterator = std::map<std::string, std::string>::iterator;
+ using td_const_iterator = std::map<std::string, std::string>::const_iterator;
+ using td_range = iterator_range<td_iterator>;
+ using td_const_range = iterator_range<td_const_iterator>;
td_iterator td_begin() { return TargetDepAttrs.begin(); }
td_iterator td_end() { return TargetDepAttrs.end(); }
diff --git a/include/llvm/IR/BasicBlock.h b/include/llvm/IR/BasicBlock.h
index bd210e1abf31..97989cf5c652 100644
--- a/include/llvm/IR/BasicBlock.h
+++ b/include/llvm/IR/BasicBlock.h
@@ -21,6 +21,7 @@
#include "llvm/IR/SymbolTableListTraits.h"
#include "llvm/IR/Value.h"
#include "llvm/Support/CBindingWrapping.h"
+#include "llvm/Support/Compiler.h"
#include "llvm-c/Types.h"
#include <cassert>
#include <cstddef>
@@ -31,7 +32,9 @@ class CallInst;
class Function;
class LandingPadInst;
class LLVMContext;
+class Module;
class TerminatorInst;
+class ValueSymbolTable;
/// \brief LLVM Basic Block Representation
///
@@ -51,7 +54,7 @@ class TerminatorInst;
class BasicBlock : public Value, // Basic blocks are data objects also
public ilist_node_with_parent<BasicBlock, Function> {
public:
- typedef SymbolTableList<Instruction> InstListType;
+ using InstListType = SymbolTableList<Instruction>;
private:
friend class BlockAddress;
@@ -80,10 +83,10 @@ public:
LLVMContext &getContext() const;
/// Instruction iterators...
- typedef InstListType::iterator iterator;
- typedef InstListType::const_iterator const_iterator;
- typedef InstListType::reverse_iterator reverse_iterator;
- typedef InstListType::const_reverse_iterator const_reverse_iterator;
+ using iterator = InstListType::iterator;
+ using const_iterator = InstListType::const_iterator;
+ using reverse_iterator = InstListType::reverse_iterator;
+ using const_reverse_iterator = InstListType::const_reverse_iterator;
/// \brief Creates a new BasicBlock.
///
diff --git a/include/llvm/IR/CFG.h b/include/llvm/IR/CFG.h
index 52de11a06baf..e259e42e1ce4 100644
--- a/include/llvm/IR/CFG.h
+++ b/include/llvm/IR/CFG.h
@@ -37,9 +37,9 @@ namespace llvm {
template <class Ptr, class USE_iterator> // Predecessor Iterator
class PredIterator : public std::iterator<std::forward_iterator_tag,
Ptr, ptrdiff_t, Ptr*, Ptr*> {
- typedef std::iterator<std::forward_iterator_tag, Ptr, ptrdiff_t, Ptr*,
- Ptr*> super;
- typedef PredIterator<Ptr, USE_iterator> Self;
+ using super =
+ std::iterator<std::forward_iterator_tag, Ptr, ptrdiff_t, Ptr*, Ptr*>;
+ using Self = PredIterator<Ptr, USE_iterator>;
USE_iterator It;
inline void advancePastNonTerminators() {
@@ -49,8 +49,8 @@ class PredIterator : public std::iterator<std::forward_iterator_tag,
}
public:
- typedef typename super::pointer pointer;
- typedef typename super::reference reference;
+ using pointer = typename super::pointer;
+ using reference = typename super::reference;
PredIterator() = default;
explicit inline PredIterator(Ptr *bb) : It(bb->user_begin()) {
@@ -90,11 +90,11 @@ public:
}
};
-typedef PredIterator<BasicBlock, Value::user_iterator> pred_iterator;
-typedef PredIterator<const BasicBlock,
- Value::const_user_iterator> const_pred_iterator;
-typedef iterator_range<pred_iterator> pred_range;
-typedef iterator_range<const_pred_iterator> pred_const_range;
+using pred_iterator = PredIterator<BasicBlock, Value::user_iterator>;
+using const_pred_iterator =
+ PredIterator<const BasicBlock, Value::const_user_iterator>;
+using pred_range = iterator_range<pred_iterator>;
+using pred_const_range = iterator_range<const_pred_iterator>;
inline pred_iterator pred_begin(BasicBlock *BB) { return pred_iterator(BB); }
inline const_pred_iterator pred_begin(const BasicBlock *BB) {
@@ -118,12 +118,12 @@ inline pred_const_range predecessors(const BasicBlock *BB) {
// BasicBlock succ_iterator helpers
//===----------------------------------------------------------------------===//
-typedef TerminatorInst::SuccIterator<TerminatorInst *, BasicBlock>
- succ_iterator;
-typedef TerminatorInst::SuccIterator<const TerminatorInst *, const BasicBlock>
- succ_const_iterator;
-typedef iterator_range<succ_iterator> succ_range;
-typedef iterator_range<succ_const_iterator> succ_const_range;
+using succ_iterator =
+ TerminatorInst::SuccIterator<TerminatorInst *, BasicBlock>;
+using succ_const_iterator =
+ TerminatorInst::SuccIterator<const TerminatorInst *, const BasicBlock>;
+using succ_range = iterator_range<succ_iterator>;
+using succ_const_range = iterator_range<succ_const_iterator>;
inline succ_iterator succ_begin(BasicBlock *BB) {
return succ_iterator(BB->getTerminator());
@@ -160,8 +160,8 @@ struct isPodLike<TerminatorInst::SuccIterator<T, U>> {
// graph of basic blocks...
template <> struct GraphTraits<BasicBlock*> {
- typedef BasicBlock *NodeRef;
- typedef succ_iterator ChildIteratorType;
+ using NodeRef = BasicBlock *;
+ using ChildIteratorType = succ_iterator;
static NodeRef getEntryNode(BasicBlock *BB) { return BB; }
static ChildIteratorType child_begin(NodeRef N) { return succ_begin(N); }
@@ -169,8 +169,8 @@ template <> struct GraphTraits<BasicBlock*> {
};
template <> struct GraphTraits<const BasicBlock*> {
- typedef const BasicBlock *NodeRef;
- typedef succ_const_iterator ChildIteratorType;
+ using NodeRef = const BasicBlock *;
+ using ChildIteratorType = succ_const_iterator;
static NodeRef getEntryNode(const BasicBlock *BB) { return BB; }
@@ -184,16 +184,18 @@ template <> struct GraphTraits<const BasicBlock*> {
// instead of the successor edges.
//
template <> struct GraphTraits<Inverse<BasicBlock*>> {
- typedef BasicBlock *NodeRef;
- typedef pred_iterator ChildIteratorType;
+ using NodeRef = BasicBlock *;
+ using ChildIteratorType = pred_iterator;
+
static NodeRef getEntryNode(Inverse<BasicBlock *> G) { return G.Graph; }
static ChildIteratorType child_begin(NodeRef N) { return pred_begin(N); }
static ChildIteratorType child_end(NodeRef N) { return pred_end(N); }
};
template <> struct GraphTraits<Inverse<const BasicBlock*>> {
- typedef const BasicBlock *NodeRef;
- typedef const_pred_iterator ChildIteratorType;
+ using NodeRef = const BasicBlock *;
+ using ChildIteratorType = const_pred_iterator;
+
static NodeRef getEntryNode(Inverse<const BasicBlock *> G) { return G.Graph; }
static ChildIteratorType child_begin(NodeRef N) { return pred_begin(N); }
static ChildIteratorType child_end(NodeRef N) { return pred_end(N); }
@@ -211,7 +213,7 @@ template <> struct GraphTraits<Function*> : public GraphTraits<BasicBlock*> {
static NodeRef getEntryNode(Function *F) { return &F->getEntryBlock(); }
// nodes_iterator/begin/end - Allow iteration over all nodes in the graph
- typedef pointer_iterator<Function::iterator> nodes_iterator;
+ using nodes_iterator = pointer_iterator<Function::iterator>;
static nodes_iterator nodes_begin(Function *F) {
return nodes_iterator(F->begin());
@@ -228,7 +230,7 @@ template <> struct GraphTraits<const Function*> :
static NodeRef getEntryNode(const Function *F) { return &F->getEntryBlock(); }
// nodes_iterator/begin/end - Allow iteration over all nodes in the graph
- typedef pointer_iterator<Function::const_iterator> nodes_iterator;
+ using nodes_iterator = pointer_iterator<Function::const_iterator>;
static nodes_iterator nodes_begin(const Function *F) {
return nodes_iterator(F->begin());
diff --git a/include/llvm/IR/CallSite.h b/include/llvm/IR/CallSite.h
index d61431a51a97..4a806ab501e5 100644
--- a/include/llvm/IR/CallSite.h
+++ b/include/llvm/IR/CallSite.h
@@ -207,7 +207,7 @@ public:
/// The type of iterator to use when looping over actual arguments at this
/// call site.
- typedef IterTy arg_iterator;
+ using arg_iterator = IterTy;
iterator_range<IterTy> args() const {
return make_range(arg_begin(), arg_end());
@@ -231,7 +231,7 @@ public:
/// Type of iterator to use when looping over data operands at this call site
/// (see below).
- typedef IterTy data_operand_iterator;
+ using data_operand_iterator = IterTy;
/// data_operands_begin/data_operands_end - Return iterators iterating over
/// the call / invoke argument list and bundle operands. For invokes, this is
diff --git a/include/llvm/IR/CallingConv.h b/include/llvm/IR/CallingConv.h
index 604e99c8b52c..39fb3f1c791b 100644
--- a/include/llvm/IR/CallingConv.h
+++ b/include/llvm/IR/CallingConv.h
@@ -1,4 +1,4 @@
-//===-- llvm/CallingConv.h - LLVM Calling Conventions -----------*- C++ -*-===//
+//===- llvm/CallingConv.h - LLVM Calling Conventions ------------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
@@ -20,8 +20,9 @@ namespace llvm {
/// the well-known calling conventions.
///
namespace CallingConv {
+
/// LLVM IR allows to use arbitrary numbers as calling convention identifiers.
- typedef unsigned ID;
+ using ID = unsigned;
/// A set of enums which specify the assigned numeric values for known llvm
/// calling conventions.
@@ -203,8 +204,9 @@ namespace CallingConv {
/// The highest possible calling convention ID. Must be some 2^k - 1.
MaxID = 1023
};
-} // End CallingConv namespace
-} // End llvm namespace
+} // end namespace CallingConv
+
+} // end namespace llvm
-#endif
+#endif // LLVM_IR_CALLINGCONV_H
diff --git a/include/llvm/IR/ConstantRange.h b/include/llvm/IR/ConstantRange.h
index fd7f96abb19e..6a50a8801f86 100644
--- a/include/llvm/IR/ConstantRange.h
+++ b/include/llvm/IR/ConstantRange.h
@@ -41,7 +41,7 @@ namespace llvm {
class MDNode;
/// This class represents a range of values.
-class ConstantRange {
+class LLVM_NODISCARD ConstantRange {
APInt Lower, Upper;
public:
@@ -167,7 +167,10 @@ public:
APInt getSetSize() const;
/// Compare set size of this range with the range CR.
- bool isSizeStrictlySmallerThanOf(const ConstantRange &CR) const;
+ bool isSizeStrictlySmallerThan(const ConstantRange &CR) const;
+
+ // Compare set size of this range with Value.
+ bool isSizeLargerThan(uint64_t MaxSize) const;
/// Return the largest unsigned value contained in the ConstantRange.
APInt getUnsignedMax() const;
diff --git a/include/llvm/IR/DataLayout.h b/include/llvm/IR/DataLayout.h
index 1930d48577d4..c1d398f17b59 100644
--- a/include/llvm/IR/DataLayout.h
+++ b/include/llvm/IR/DataLayout.h
@@ -1,4 +1,4 @@
-//===--------- llvm/DataLayout.h - Data size & alignment info ---*- C++ -*-===//
+//===- llvm/DataLayout.h - Data size & alignment info -----------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
@@ -20,27 +20,32 @@
#ifndef LLVM_IR_DATALAYOUT_H
#define LLVM_IR_DATALAYOUT_H
+#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/STLExtras.h"
+#include "llvm/ADT/StringRef.h"
#include "llvm/IR/DerivedTypes.h"
#include "llvm/IR/Type.h"
#include "llvm/Pass.h"
-#include "llvm/Support/DataTypes.h"
+#include "llvm/Support/Casting.h"
+#include "llvm/Support/ErrorHandling.h"
+#include "llvm/Support/MathExtras.h"
+#include <cassert>
+#include <cstdint>
+#include <string>
// This needs to be outside of the namespace, to avoid conflict with llvm-c
// decl.
-typedef struct LLVMOpaqueTargetData *LLVMTargetDataRef;
+using LLVMTargetDataRef = struct LLVMOpaqueTargetData *;
namespace llvm {
-class Value;
-class StructType;
-class StructLayout;
-class Triple;
class GlobalVariable;
class LLVMContext;
-template<typename T>
-class ArrayRef;
+class Module;
+class StructLayout;
+class Triple;
+class Value;
/// Enum used to categorize the alignment types stored by LayoutAlignElem
enum AlignTypeEnum {
@@ -72,6 +77,7 @@ struct LayoutAlignElem {
static LayoutAlignElem get(AlignTypeEnum align_type, unsigned abi_align,
unsigned pref_align, uint32_t bit_width);
+
bool operator==(const LayoutAlignElem &rhs) const;
};
@@ -90,6 +96,7 @@ struct PointerAlignElem {
/// Initializer
static PointerAlignElem get(uint32_t AddressSpace, unsigned ABIAlign,
unsigned PrefAlign, uint32_t TypeByteWidth);
+
bool operator==(const PointerAlignElem &rhs) const;
};
@@ -121,7 +128,7 @@ private:
/// \brief Primitive type alignment data. This is sorted by type and bit
/// width during construction.
- typedef SmallVector<LayoutAlignElem, 16> AlignmentsTy;
+ using AlignmentsTy = SmallVector<LayoutAlignElem, 16>;
AlignmentsTy Alignments;
AlignmentsTy::const_iterator
@@ -136,7 +143,7 @@ private:
/// \brief The string representation used to create this DataLayout
std::string StringRepresentation;
- typedef SmallVector<PointerAlignElem, 8> PointersTy;
+ using PointersTy = SmallVector<PointerAlignElem, 8>;
PointersTy Pointers;
PointersTy::const_iterator
@@ -147,7 +154,7 @@ private:
PointersTy::iterator findPointerLowerBound(uint32_t AddressSpace);
// The StructType -> StructLayout map.
- mutable void *LayoutMap;
+ mutable void *LayoutMap = nullptr;
/// Pointers in these address spaces are non-integral, and don't have a
/// well-defined bitwise representation.
@@ -172,16 +179,16 @@ private:
public:
/// Constructs a DataLayout from a specification string. See reset().
- explicit DataLayout(StringRef LayoutDescription) : LayoutMap(nullptr) {
+ explicit DataLayout(StringRef LayoutDescription) {
reset(LayoutDescription);
}
/// Initialize target data from properties stored in the module.
explicit DataLayout(const Module *M);
- void init(const Module *M);
+ DataLayout(const DataLayout &DL) { *this = DL; }
- DataLayout(const DataLayout &DL) : LayoutMap(nullptr) { *this = DL; }
+ ~DataLayout(); // Not virtual, do not subclass this class
DataLayout &operator=(const DataLayout &DL) {
clear();
@@ -200,7 +207,7 @@ public:
bool operator==(const DataLayout &Other) const;
bool operator!=(const DataLayout &Other) const { return !(*this == Other); }
- ~DataLayout(); // Not virtual, do not subclass this class
+ void init(const Module *M);
/// Parse a data layout string (with fallback to default values).
void reset(StringRef LayoutDescription);
@@ -489,6 +496,7 @@ class StructLayout {
unsigned IsPadded : 1;
unsigned NumElements : 31;
uint64_t MemberOffsets[1]; // variable sized array!
+
public:
uint64_t getSizeInBytes() const { return StructSize; }
@@ -515,6 +523,7 @@ public:
private:
friend class DataLayout; // Only DataLayout can create this class
+
StructLayout(StructType *ST, const DataLayout &DL);
};
@@ -560,6 +569,6 @@ inline uint64_t DataLayout::getTypeSizeInBits(Type *Ty) const {
}
}
-} // End llvm namespace
+} // end namespace llvm
-#endif
+#endif // LLVM_IR_DATALAYOUT_H
diff --git a/include/llvm/IR/DebugInfo.h b/include/llvm/IR/DebugInfo.h
index 04f46197b1c3..1d8e7e2855fd 100644
--- a/include/llvm/IR/DebugInfo.h
+++ b/include/llvm/IR/DebugInfo.h
@@ -21,17 +21,12 @@
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/iterator_range.h"
#include "llvm/IR/DebugInfoMetadata.h"
-#include "llvm/Support/Casting.h"
-#include "llvm/Support/Dwarf.h"
-#include "llvm/Support/ErrorHandling.h"
-#include <iterator>
namespace llvm {
-class Module;
+
class DbgDeclareInst;
class DbgValueInst;
-template <typename K, typename V, typename KeyInfoT, typename BucketT>
-class DenseMap;
+class Module;
/// \brief Find subprogram that is enclosing this scope.
DISubprogram *getDISubprogram(const MDNode *Scope);
@@ -95,13 +90,13 @@ private:
bool addScope(DIScope *Scope);
public:
- typedef SmallVectorImpl<DICompileUnit *>::const_iterator
- compile_unit_iterator;
- typedef SmallVectorImpl<DISubprogram *>::const_iterator subprogram_iterator;
- typedef SmallVectorImpl<DIGlobalVariableExpression *>::const_iterator
- global_variable_expression_iterator;
- typedef SmallVectorImpl<DIType *>::const_iterator type_iterator;
- typedef SmallVectorImpl<DIScope *>::const_iterator scope_iterator;
+ using compile_unit_iterator =
+ SmallVectorImpl<DICompileUnit *>::const_iterator;
+ using subprogram_iterator = SmallVectorImpl<DISubprogram *>::const_iterator;
+ using global_variable_expression_iterator =
+ SmallVectorImpl<DIGlobalVariableExpression *>::const_iterator;
+ using type_iterator = SmallVectorImpl<DIType *>::const_iterator;
+ using scope_iterator = SmallVectorImpl<DIScope *>::const_iterator;
iterator_range<compile_unit_iterator> compile_units() const {
return make_range(CUs.begin(), CUs.end());
@@ -140,4 +135,4 @@ private:
} // end namespace llvm
-#endif
+#endif // LLVM_IR_DEBUGINFO_H
diff --git a/include/llvm/IR/Dominators.h b/include/llvm/IR/Dominators.h
index 8f6c85f53efc..def91e73eb1d 100644
--- a/include/llvm/IR/Dominators.h
+++ b/include/llvm/IR/Dominators.h
@@ -42,7 +42,7 @@ extern template void Calculate<Function, Inverse<BasicBlock *>>(
DominatorTreeBaseByGraphTraits<GraphTraits<Inverse<BasicBlock *>>> &DT,
Function &F);
-typedef DomTreeNodeBase<BasicBlock> DomTreeNode;
+using DomTreeNode = DomTreeNodeBase<BasicBlock>;
class BasicBlockEdge {
const BasicBlock *Start;
@@ -70,7 +70,7 @@ public:
};
template <> struct DenseMapInfo<BasicBlockEdge> {
- typedef DenseMapInfo<const BasicBlock *> BBInfo;
+ using BBInfo = DenseMapInfo<const BasicBlock *>;
static unsigned getHashValue(const BasicBlockEdge *V);
@@ -113,7 +113,7 @@ template <> struct DenseMapInfo<BasicBlockEdge> {
/// preceding statements; this is stated only to assist human understanding.
class DominatorTree : public DominatorTreeBase<BasicBlock> {
public:
- typedef DominatorTreeBase<BasicBlock> Base;
+ using Base = DominatorTreeBase<BasicBlock>;
DominatorTree() : DominatorTreeBase<BasicBlock>(false) {}
explicit DominatorTree(Function &F) : DominatorTreeBase<BasicBlock>(false) {
@@ -168,9 +168,9 @@ public:
// iterable by generic graph iterators.
template <class Node, class ChildIterator> struct DomTreeGraphTraitsBase {
- typedef Node *NodeRef;
- typedef ChildIterator ChildIteratorType;
- typedef df_iterator<Node *, df_iterator_default_set<Node*>> nodes_iterator;
+ using NodeRef = Node *;
+ using ChildIteratorType = ChildIterator;
+ using nodes_iterator = df_iterator<Node *, df_iterator_default_set<Node*>>;
static NodeRef getEntryNode(NodeRef N) { return N; }
static ChildIteratorType child_begin(NodeRef N) { return N->begin(); }
@@ -212,7 +212,7 @@ class DominatorTreeAnalysis : public AnalysisInfoMixin<DominatorTreeAnalysis> {
public:
/// \brief Provide the result typedef for this analysis pass.
- typedef DominatorTree Result;
+ using Result = DominatorTree;
/// \brief Run the analysis pass over a function and produce a dominator tree.
DominatorTree run(Function &F, FunctionAnalysisManager &);
diff --git a/include/llvm/IR/Function.h b/include/llvm/IR/Function.h
index f9582f51ca8d..c12a125b6352 100644
--- a/include/llvm/IR/Function.h
+++ b/include/llvm/IR/Function.h
@@ -466,7 +466,6 @@ public:
/// @brief Determine if the parameter or return value is marked with NoAlias
/// attribute.
- /// @param n The parameter to check. 1 is the first parameter, 0 is the return
bool returnDoesNotAlias() const {
return AttributeSets.hasAttribute(AttributeList::ReturnIndex,
Attribute::NoAlias);
diff --git a/include/llvm/IR/InlineAsm.h b/include/llvm/IR/InlineAsm.h
index 5d2f72d211ff..a57e7d63012b 100644
--- a/include/llvm/IR/InlineAsm.h
+++ b/include/llvm/IR/InlineAsm.h
@@ -95,7 +95,7 @@ public:
isClobber // '~x'
};
- typedef std::vector<std::string> ConstraintCodeVector;
+ using ConstraintCodeVector = std::vector<std::string>;
struct SubConstraintInfo {
/// MatchingInput - If this is not -1, this is an output constraint where an
@@ -112,9 +112,9 @@ public:
SubConstraintInfo() = default;
};
- typedef std::vector<SubConstraintInfo> SubConstraintInfoVector;
+ using SubConstraintInfoVector = std::vector<SubConstraintInfo>;
struct ConstraintInfo;
- typedef std::vector<ConstraintInfo> ConstraintInfoVector;
+ using ConstraintInfoVector = std::vector<ConstraintInfo>;
struct ConstraintInfo {
/// Type - The basic type of the constraint: input/output/clobber
diff --git a/include/llvm/IR/InstIterator.h b/include/llvm/IR/InstIterator.h
index 28fc473f1490..2988fc935dd5 100644
--- a/include/llvm/IR/InstIterator.h
+++ b/include/llvm/IR/InstIterator.h
@@ -31,20 +31,20 @@ namespace llvm {
// inst_iterator and const_inst_iterator's.
//
template <class BB_t, class BB_i_t, class BI_t, class II_t> class InstIterator {
- typedef BB_t BBty;
- typedef BB_i_t BBIty;
- typedef BI_t BIty;
- typedef II_t IIty;
+ using BBty = BB_t;
+ using BBIty = BB_i_t;
+ using BIty = BI_t;
+ using IIty = II_t;
BB_t *BBs; // BasicBlocksType
BB_i_t BB; // BasicBlocksType::iterator
BI_t BI; // BasicBlock::iterator
public:
- typedef std::bidirectional_iterator_tag iterator_category;
- typedef IIty value_type;
- typedef signed difference_type;
- typedef IIty* pointer;
- typedef IIty& reference;
+ using iterator_category = std::bidirectional_iterator_tag;
+ using value_type = IIty;
+ using difference_type = signed;
+ using pointer = IIty *;
+ using reference = IIty &;
// Default constructor
InstIterator() = default;
@@ -119,13 +119,15 @@ private:
}
};
-typedef InstIterator<SymbolTableList<BasicBlock>, Function::iterator,
- BasicBlock::iterator, Instruction> inst_iterator;
-typedef InstIterator<const SymbolTableList<BasicBlock>,
- Function::const_iterator, BasicBlock::const_iterator,
- const Instruction> const_inst_iterator;
-typedef iterator_range<inst_iterator> inst_range;
-typedef iterator_range<const_inst_iterator> const_inst_range;
+using inst_iterator =
+ InstIterator<SymbolTableList<BasicBlock>, Function::iterator,
+ BasicBlock::iterator, Instruction>;
+using const_inst_iterator =
+ InstIterator<const SymbolTableList<BasicBlock>,
+ Function::const_iterator, BasicBlock::const_iterator,
+ const Instruction>;
+using inst_range = iterator_range<inst_iterator>;
+using const_inst_range = iterator_range<const_inst_iterator>;
inline inst_iterator inst_begin(Function *F) { return inst_iterator(*F); }
inline inst_iterator inst_end(Function *F) { return inst_iterator(*F, true); }
diff --git a/include/llvm/IR/InstrTypes.h b/include/llvm/IR/InstrTypes.h
index 6795b029cce9..d16a5d318d78 100644
--- a/include/llvm/IR/InstrTypes.h
+++ b/include/llvm/IR/InstrTypes.h
@@ -1,4 +1,4 @@
-//===-- llvm/InstrTypes.h - Important Instruction subclasses ----*- C++ -*-===//
+//===- llvm/InstrTypes.h - Important Instruction subclasses -----*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
@@ -29,7 +29,9 @@
#include "llvm/IR/Instruction.h"
#include "llvm/IR/LLVMContext.h"
#include "llvm/IR/OperandTraits.h"
+#include "llvm/IR/Type.h"
#include "llvm/IR/User.h"
+#include "llvm/IR/Value.h"
#include "llvm/Support/Casting.h"
#include "llvm/Support/ErrorHandling.h"
#include <algorithm>
@@ -114,17 +116,17 @@ public:
template <class Term, class BB> // Successor Iterator
class SuccIterator : public std::iterator<std::random_access_iterator_tag, BB,
int, BB *, BB *> {
- typedef std::iterator<std::random_access_iterator_tag, BB, int, BB *, BB *>
- super;
+ using super =
+ std::iterator<std::random_access_iterator_tag, BB, int, BB *, BB *>;
public:
- typedef typename super::pointer pointer;
- typedef typename super::reference reference;
+ using pointer = typename super::pointer;
+ using reference = typename super::reference;
private:
Term TermInst;
unsigned idx;
- typedef SuccIterator<Term, BB> Self;
+ using Self = SuccIterator<Term, BB>;
inline bool index_is_valid(unsigned idx) {
return idx < TermInst->getNumSuccessors();
@@ -260,11 +262,11 @@ public:
}
};
- typedef SuccIterator<TerminatorInst *, BasicBlock> succ_iterator;
- typedef SuccIterator<const TerminatorInst *, const BasicBlock>
- succ_const_iterator;
- typedef iterator_range<succ_iterator> succ_range;
- typedef iterator_range<succ_const_iterator> succ_const_range;
+ using succ_iterator = SuccIterator<TerminatorInst *, BasicBlock>;
+ using succ_const_iterator =
+ SuccIterator<const TerminatorInst *, const BasicBlock>;
+ using succ_range = iterator_range<succ_iterator>;
+ using succ_const_range = iterator_range<succ_const_iterator>;
private:
inline succ_iterator succ_begin() { return succ_iterator(this); }
@@ -341,14 +343,16 @@ DEFINE_TRANSPARENT_OPERAND_ACCESSORS(UnaryInstruction, Value)
class BinaryOperator : public Instruction {
protected:
- void init(BinaryOps iType);
BinaryOperator(BinaryOps iType, Value *S1, Value *S2, Type *Ty,
const Twine &Name, Instruction *InsertBefore);
BinaryOperator(BinaryOps iType, Value *S1, Value *S2, Type *Ty,
const Twine &Name, BasicBlock *InsertAtEnd);
+ void init(BinaryOps iType);
+
// Note: Instruction needs to be a friend here to call cloneImpl.
friend class Instruction;
+
BinaryOperator *cloneImpl() const;
public:
@@ -1125,8 +1129,6 @@ DEFINE_TRANSPARENT_OPERAND_ACCESSORS(CmpInst, Value)
//===----------------------------------------------------------------------===//
class FuncletPadInst : public Instruction {
private:
- void init(Value *ParentPad, ArrayRef<Value *> Args, const Twine &NameStr);
-
FuncletPadInst(const FuncletPadInst &CPI);
explicit FuncletPadInst(Instruction::FuncletPadOps Op, Value *ParentPad,
@@ -1136,11 +1138,14 @@ private:
ArrayRef<Value *> Args, unsigned Values,
const Twine &NameStr, BasicBlock *InsertAtEnd);
+ void init(Value *ParentPad, ArrayRef<Value *> Args, const Twine &NameStr);
+
protected:
// Note: Instruction needs to be a friend here to call cloneImpl.
friend class Instruction;
friend class CatchPadInst;
friend class CleanupPadInst;
+
FuncletPadInst *cloneImpl() const;
public:
@@ -1261,7 +1266,8 @@ public:
ArrayRef<InputTy> inputs() const { return Inputs; }
- typedef typename std::vector<InputTy>::const_iterator input_iterator;
+ using input_iterator = typename std::vector<InputTy>::const_iterator;
+
size_t input_size() const { return Inputs.size(); }
input_iterator input_begin() const { return Inputs.begin(); }
input_iterator input_end() const { return Inputs.end(); }
@@ -1269,8 +1275,8 @@ public:
StringRef getTag() const { return Tag; }
};
-typedef OperandBundleDefT<Value *> OperandBundleDef;
-typedef OperandBundleDefT<const Value *> ConstOperandBundleDef;
+using OperandBundleDef = OperandBundleDefT<Value *>;
+using ConstOperandBundleDef = OperandBundleDefT<const Value *>;
/// \brief A mixin to add operand bundle functionality to llvm instruction
/// classes.
@@ -1553,8 +1559,8 @@ protected:
return OperandBundleUse(BOI.Tag, Inputs);
}
- typedef BundleOpInfo *bundle_op_iterator;
- typedef const BundleOpInfo *const_bundle_op_iterator;
+ using bundle_op_iterator = BundleOpInfo *;
+ using const_bundle_op_iterator = const BundleOpInfo *;
/// \brief Return the start of the list of BundleOpInfo instances associated
/// with this OperandBundleUser.
@@ -1654,6 +1660,6 @@ protected:
}
};
-} // end llvm namespace
+} // end namespace llvm
#endif // LLVM_IR_INSTRTYPES_H
diff --git a/include/llvm/IR/Intrinsics.td b/include/llvm/IR/Intrinsics.td
index cf7e5d8758a9..7b78d4d3d34a 100644
--- a/include/llvm/IR/Intrinsics.td
+++ b/include/llvm/IR/Intrinsics.td
@@ -795,6 +795,14 @@ def int_type_checked_load : Intrinsic<[llvm_ptr_ty, llvm_i1_ty],
def int_load_relative: Intrinsic<[llvm_ptr_ty], [llvm_ptr_ty, llvm_anyint_ty],
[IntrReadMem, IntrArgMemOnly]>;
+// Xray intrinsics
+//===----------------------------------------------------------------------===//
+// Custom event logging for x-ray.
+// Takes a pointer to a string and the length of the string.
+def int_xray_customevent : Intrinsic<[], [llvm_ptr_ty, llvm_i32_ty],
+ [NoCapture<0>, ReadOnly<0>, IntrWriteMem]>;
+//===----------------------------------------------------------------------===//
+
//===------ Memory intrinsics with element-wise atomicity guarantees ------===//
//
diff --git a/include/llvm/IR/IntrinsicsARM.td b/include/llvm/IR/IntrinsicsARM.td
index 18ed24be56d4..fe3861301689 100644
--- a/include/llvm/IR/IntrinsicsARM.td
+++ b/include/llvm/IR/IntrinsicsARM.td
@@ -22,12 +22,26 @@ let TargetPrefix = "arm" in { // All intrinsics start with "llvm.arm.".
// and return value are essentially chains, used to force ordering during ISel.
def int_arm_space : Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty], []>;
+// 16-bit multiplications
+def int_arm_smulbb : GCCBuiltin<"__builtin_arm_smulbb">,
+ Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty], [IntrNoMem]>;
+def int_arm_smulbt : GCCBuiltin<"__builtin_arm_smulbt">,
+ Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty], [IntrNoMem]>;
+def int_arm_smultb : GCCBuiltin<"__builtin_arm_smultb">,
+ Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty], [IntrNoMem]>;
+def int_arm_smultt : GCCBuiltin<"__builtin_arm_smultt">,
+ Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty], [IntrNoMem]>;
+def int_arm_smulwb : GCCBuiltin<"__builtin_arm_smulwb">,
+ Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty], [IntrNoMem]>;
+def int_arm_smulwt : GCCBuiltin<"__builtin_arm_smulwt">,
+ Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty], [IntrNoMem]>;
+
//===----------------------------------------------------------------------===//
// Saturating Arithmetic
def int_arm_qadd : GCCBuiltin<"__builtin_arm_qadd">,
Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty],
- [IntrNoMem, Commutative]>;
+ [Commutative, IntrNoMem]>;
def int_arm_qsub : GCCBuiltin<"__builtin_arm_qsub">,
Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty], [IntrNoMem]>;
def int_arm_ssat : GCCBuiltin<"__builtin_arm_ssat">,
@@ -35,6 +49,176 @@ def int_arm_ssat : GCCBuiltin<"__builtin_arm_ssat">,
def int_arm_usat : GCCBuiltin<"__builtin_arm_usat">,
Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty], [IntrNoMem]>;
+// Accumulating multiplications
+def int_arm_smlabb : GCCBuiltin<"__builtin_arm_smlabb">,
+ Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
+ [IntrNoMem]>;
+def int_arm_smlabt : GCCBuiltin<"__builtin_arm_smlabt">,
+ Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
+ [IntrNoMem]>;
+def int_arm_smlatb : GCCBuiltin<"__builtin_arm_smlatb">,
+ Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
+ [IntrNoMem]>;
+def int_arm_smlatt : GCCBuiltin<"__builtin_arm_smlatt">,
+ Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
+ [IntrNoMem]>;
+def int_arm_smlawb : GCCBuiltin<"__builtin_arm_smlawb">,
+ Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
+ [IntrNoMem]>;
+def int_arm_smlawt : GCCBuiltin<"__builtin_arm_smlawt">,
+ Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
+ [IntrNoMem]>;
+
+// Parallel 16-bit saturation
+def int_arm_ssat16 : GCCBuiltin<"__builtin_arm_ssat16">,
+ Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty], [IntrNoMem]>;
+def int_arm_usat16 : GCCBuiltin<"__builtin_arm_usat16">,
+ Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty], [IntrNoMem]>;
+
+// Packing and unpacking
+def int_arm_sxtab16 : GCCBuiltin<"__builtin_arm_sxtab16">,
+ Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty], [IntrNoMem]>;
+def int_arm_sxtb16 : GCCBuiltin<"__builtin_arm_sxtb16">,
+ Intrinsic<[llvm_i32_ty], [llvm_i32_ty], [IntrNoMem]>;
+def int_arm_uxtab16 : GCCBuiltin<"__builtin_arm_uxtab16">,
+ Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty], [IntrNoMem]>;
+def int_arm_uxtb16 : GCCBuiltin<"__builtin_arm_uxtb16">,
+ Intrinsic<[llvm_i32_ty], [llvm_i32_ty], [IntrNoMem]>;
+
+// Parallel selection, reads the GE flags.
+def int_arm_sel : GCCBuiltin<"__builtin_arm_sel">,
+ Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty], [IntrReadMem]>;
+
+// Parallel 8-bit addition and subtraction
+def int_arm_qadd8 : GCCBuiltin<"__builtin_arm_qadd8">,
+ Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty], [IntrNoMem]>;
+def int_arm_qsub8 : GCCBuiltin<"__builtin_arm_qsub8">,
+ Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty], [IntrNoMem]>;
+// Writes to the GE bits.
+def int_arm_sadd8 : GCCBuiltin<"__builtin_arm_sadd8">,
+ Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty], []>;
+def int_arm_shadd8 : GCCBuiltin<"__builtin_arm_shadd8">,
+ Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty], [IntrNoMem]>;
+def int_arm_shsub8 : GCCBuiltin<"__builtin_arm_shsub8">,
+ Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty], [IntrNoMem]>;
+// Writes to the GE bits.
+def int_arm_ssub8 : GCCBuiltin<"__builtin_arm_ssub8">,
+ Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty], []>;
+// Writes to the GE bits.
+def int_arm_uadd8 : GCCBuiltin<"__builtin_arm_uadd8">,
+ Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty], []>;
+def int_arm_uhadd8 : GCCBuiltin<"__builtin_arm_uhadd8">,
+ Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty], [IntrNoMem]>;
+def int_arm_uhsub8 : GCCBuiltin<"__builtin_arm_uhsub8">,
+ Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty], [IntrNoMem]>;
+def int_arm_uqadd8 : GCCBuiltin<"__builtin_arm_uqadd8">,
+ Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty], [IntrNoMem]>;
+def int_arm_uqsub8 : GCCBuiltin<"__builtin_arm_uqsub8">,
+ Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty], [IntrNoMem]>;
+// Writes to the GE bits.
+def int_arm_usub8 : GCCBuiltin<"__builtin_arm_usub8">,
+ Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty], []>;
+
+// Sum of 8-bit absolute differences
+def int_arm_usad8 : GCCBuiltin<"__builtin_arm_usad8">,
+ Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty], [IntrNoMem]>;
+def int_arm_usada8 : GCCBuiltin<"__builtin_arm_usada8">,
+ Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
+ [IntrNoMem]>;
+
+// Parallel 16-bit addition and subtraction
+def int_arm_qadd16 : GCCBuiltin<"__builtin_arm_qadd16">,
+ Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty], [IntrNoMem]>;
+def int_arm_qasx : GCCBuiltin<"__builtin_arm_qasx">,
+ Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty], [IntrNoMem]>;
+def int_arm_qsax : GCCBuiltin<"__builtin_arm_qsax">,
+ Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty], [IntrNoMem]>;
+def int_arm_qsub16 : GCCBuiltin<"__builtin_arm_qsub16">,
+ Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty], [IntrNoMem]>;
+// Writes to the GE bits.
+def int_arm_sadd16 : GCCBuiltin<"__builtin_arm_sadd16">,
+ Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty], []>;
+// Writes to the GE bits.
+def int_arm_sasx : GCCBuiltin<"__builtin_arm_sasx">,
+ Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty], []>;
+def int_arm_shadd16 : GCCBuiltin<"__builtin_arm_shadd16">,
+ Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty], [IntrNoMem]>;
+def int_arm_shasx : GCCBuiltin<"__builtin_arm_shasx">,
+ Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty], [IntrNoMem]>;
+def int_arm_shsax : GCCBuiltin<"__builtin_arm_shsax">,
+ Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty], [IntrNoMem]>;
+def int_arm_shsub16 : GCCBuiltin<"__builtin_arm_shsub16">,
+ Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty], [IntrNoMem]>;
+// Writes to the GE bits.
+def int_arm_ssax : GCCBuiltin<"__builtin_arm_ssax">,
+ Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty], []>;
+// Writes to the GE bits.
+def int_arm_ssub16 : GCCBuiltin<"__builtin_arm_ssub16">,
+ Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty], []>;
+// Writes to the GE bits.
+def int_arm_uadd16 : GCCBuiltin<"__builtin_arm_uadd16">,
+ Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty], []>;
+// Writes to the GE bits.
+def int_arm_uasx : GCCBuiltin<"__builtin_arm_uasx">,
+ Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty], []>;
+def int_arm_uhadd16 : GCCBuiltin<"__builtin_arm_uhadd16">,
+ Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty], [IntrNoMem]>;
+def int_arm_uhasx : GCCBuiltin<"__builtin_arm_uhasx">,
+ Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty], [IntrNoMem]>;
+def int_arm_uhsax : GCCBuiltin<"__builtin_arm_uhsax">,
+ Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty], [IntrNoMem]>;
+def int_arm_uhsub16 : GCCBuiltin<"__builtin_arm_uhsub16">,
+ Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty], [IntrNoMem]>;
+def int_arm_uqadd16 : GCCBuiltin<"__builtin_arm_uqadd16">,
+ Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty], [IntrNoMem]>;
+def int_arm_uqasx : GCCBuiltin<"__builtin_arm_uqasx">,
+ Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty], [IntrNoMem]>;
+def int_arm_uqsax : GCCBuiltin<"__builtin_arm_uqsax">,
+ Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty], [IntrNoMem]>;
+def int_arm_uqsub16 : GCCBuiltin<"__builtin_arm_uqsub16">,
+ Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty], [IntrNoMem]>;
+// Writes to the GE bits.
+def int_arm_usax : GCCBuiltin<"__builtin_arm_usax">,
+ Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty], []>;
+// Writes to the GE bits.
+def int_arm_usub16 : GCCBuiltin<"__builtin_arm_usub16">,
+ Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty], []>;
+
+// Parallel 16-bit multiplication
+def int_arm_smlad : GCCBuiltin<"__builtin_arm_smlad">,
+ Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
+ [IntrNoMem]>;
+def int_arm_smladx : GCCBuiltin<"__builtin_arm_smladx">,
+ Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
+ [IntrNoMem]>;
+def int_arm_smlald : GCCBuiltin<"__builtin_arm_smlald">,
+ Intrinsic<[llvm_i64_ty], [llvm_i32_ty, llvm_i32_ty, llvm_i64_ty],
+ [IntrNoMem]>;
+def int_arm_smlaldx : GCCBuiltin<"__builtin_arm_smlaldx">,
+ Intrinsic<[llvm_i64_ty], [llvm_i32_ty, llvm_i32_ty, llvm_i64_ty],
+ [IntrNoMem]>;
+def int_arm_smlsd : GCCBuiltin<"__builtin_arm_smlsd">,
+ Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
+ [IntrNoMem]>;
+def int_arm_smlsdx : GCCBuiltin<"__builtin_arm_smlsdx">,
+ Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
+ [IntrNoMem]>;
+def int_arm_smlsld : GCCBuiltin<"__builtin_arm_smlsld">,
+ Intrinsic<[llvm_i64_ty], [llvm_i32_ty, llvm_i32_ty, llvm_i64_ty],
+ [IntrNoMem]>;
+def int_arm_smlsldx : GCCBuiltin<"__builtin_arm_smlsldx">,
+ Intrinsic<[llvm_i64_ty], [llvm_i32_ty, llvm_i32_ty, llvm_i64_ty],
+ [IntrNoMem]>;
+def int_arm_smuad : GCCBuiltin<"__builtin_arm_smuad">,
+ Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty], [IntrNoMem]>;
+def int_arm_smuadx : GCCBuiltin<"__builtin_arm_smuadx">,
+ Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty], [IntrNoMem]>;
+def int_arm_smusd : GCCBuiltin<"__builtin_arm_smusd">,
+ Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty], [IntrNoMem]>;
+def int_arm_smusdx : GCCBuiltin<"__builtin_arm_smusdx">,
+ Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty], [IntrNoMem]>;
+
+
//===----------------------------------------------------------------------===//
// Load, Store and Clear exclusive
diff --git a/include/llvm/IR/ModuleSummaryIndex.h b/include/llvm/IR/ModuleSummaryIndex.h
index a7274fbfbced..53570bdf16f4 100644
--- a/include/llvm/IR/ModuleSummaryIndex.h
+++ b/include/llvm/IR/ModuleSummaryIndex.h
@@ -45,58 +45,54 @@ struct CalleeInfo {
}
};
-/// Struct to hold value either by GUID or GlobalValue*. Values in combined
-/// indexes as well as indirect calls are GUIDs, all others are GlobalValues.
-struct ValueInfo {
- /// The value representation used in this instance.
- enum ValueInfoKind {
- VI_GUID,
- VI_Value,
- };
+class GlobalValueSummary;
- /// Union of the two possible value types.
- union ValueUnion {
- GlobalValue::GUID Id;
- const GlobalValue *GV;
- ValueUnion(GlobalValue::GUID Id) : Id(Id) {}
- ValueUnion(const GlobalValue *GV) : GV(GV) {}
- };
+typedef std::vector<std::unique_ptr<GlobalValueSummary>> GlobalValueSummaryList;
- /// The value being represented.
- ValueUnion TheValue;
- /// The value representation.
- ValueInfoKind Kind;
- /// Constructor for a GUID value
- ValueInfo(GlobalValue::GUID Id = 0) : TheValue(Id), Kind(VI_GUID) {}
- /// Constructor for a GlobalValue* value
- ValueInfo(const GlobalValue *V) : TheValue(V), Kind(VI_Value) {}
- /// Accessor for GUID value
- GlobalValue::GUID getGUID() const {
- assert(Kind == VI_GUID && "Not a GUID type");
- return TheValue.Id;
- }
- /// Accessor for GlobalValue* value
- const GlobalValue *getValue() const {
- assert(Kind == VI_Value && "Not a Value type");
- return TheValue.GV;
- }
- bool isGUID() const { return Kind == VI_GUID; }
+struct GlobalValueSummaryInfo {
+ /// The GlobalValue corresponding to this summary. This is only used in
+ /// per-module summaries.
+ const GlobalValue *GV = nullptr;
+
+ /// List of global value summary structures for a particular value held
+ /// in the GlobalValueMap. Requires a vector in the case of multiple
+ /// COMDAT values of the same name.
+ GlobalValueSummaryList SummaryList;
};
-template <> struct DenseMapInfo<ValueInfo> {
- static inline ValueInfo getEmptyKey() { return ValueInfo((GlobalValue *)-1); }
- static inline ValueInfo getTombstoneKey() {
- return ValueInfo((GlobalValue *)-2);
+/// Map from global value GUID to corresponding summary structures. Use a
+/// std::map rather than a DenseMap so that pointers to the map's value_type
+/// (which are used by ValueInfo) are not invalidated by insertion. Also it will
+/// likely incur less overhead, as the value type is not very small and the size
+/// of the map is unknown, resulting in inefficiencies due to repeated
+/// insertions and resizing.
+typedef std::map<GlobalValue::GUID, GlobalValueSummaryInfo>
+ GlobalValueSummaryMapTy;
+
+/// Struct that holds a reference to a particular GUID in a global value
+/// summary.
+struct ValueInfo {
+ const GlobalValueSummaryMapTy::value_type *Ref = nullptr;
+ ValueInfo() = default;
+ ValueInfo(const GlobalValueSummaryMapTy::value_type *Ref) : Ref(Ref) {}
+ operator bool() const { return Ref; }
+
+ GlobalValue::GUID getGUID() const { return Ref->first; }
+ const GlobalValue *getValue() const { return Ref->second.GV; }
+ ArrayRef<std::unique_ptr<GlobalValueSummary>> getSummaryList() const {
+ return Ref->second.SummaryList;
}
- static bool isEqual(ValueInfo L, ValueInfo R) {
- if (L.isGUID() != R.isGUID())
- return false;
- return L.isGUID() ? (L.getGUID() == R.getGUID())
- : (L.getValue() == R.getValue());
+};
+
+template <> struct DenseMapInfo<ValueInfo> {
+ static inline ValueInfo getEmptyKey() {
+ return ValueInfo((GlobalValueSummaryMapTy::value_type *)-1);
}
- static unsigned getHashValue(ValueInfo I) {
- return I.isGUID() ? I.getGUID() : (uintptr_t)I.getValue();
+ static inline ValueInfo getTombstoneKey() {
+ return ValueInfo((GlobalValueSummaryMapTy::value_type *)-2);
}
+ static bool isEqual(ValueInfo L, ValueInfo R) { return L.Ref == R.Ref; }
+ static unsigned getHashValue(ValueInfo I) { return (uintptr_t)I.Ref; }
};
/// \brief Function and variable summary information to aid decisions and
@@ -483,19 +479,6 @@ struct TypeIdSummary {
/// 160 bits SHA1
typedef std::array<uint32_t, 5> ModuleHash;
-/// List of global value summary structures for a particular value held
-/// in the GlobalValueMap. Requires a vector in the case of multiple
-/// COMDAT values of the same name.
-typedef std::vector<std::unique_ptr<GlobalValueSummary>> GlobalValueSummaryList;
-
-/// Map from global value GUID to corresponding summary structures.
-/// Use a std::map rather than a DenseMap since it will likely incur
-/// less overhead, as the value type is not very small and the size
-/// of the map is unknown, resulting in inefficiencies due to repeated
-/// insertions and resizing.
-typedef std::map<GlobalValue::GUID, GlobalValueSummaryList>
- GlobalValueSummaryMapTy;
-
/// Type used for iterating through the global value summary map.
typedef GlobalValueSummaryMapTy::const_iterator const_gvsummary_iterator;
typedef GlobalValueSummaryMapTy::iterator gvsummary_iterator;
@@ -532,6 +515,11 @@ private:
// YAML I/O support.
friend yaml::MappingTraits<ModuleSummaryIndex>;
+ GlobalValueSummaryMapTy::value_type *
+ getOrInsertValuePtr(GlobalValue::GUID GUID) {
+ return &*GlobalValueMap.emplace(GUID, GlobalValueSummaryInfo{}).first;
+ }
+
public:
gvsummary_iterator begin() { return GlobalValueMap.begin(); }
const_gvsummary_iterator begin() const { return GlobalValueMap.begin(); }
@@ -539,21 +527,22 @@ public:
const_gvsummary_iterator end() const { return GlobalValueMap.end(); }
size_t size() const { return GlobalValueMap.size(); }
- /// Get the list of global value summary objects for a given value name.
- const GlobalValueSummaryList &getGlobalValueSummaryList(StringRef ValueName) {
- return GlobalValueMap[GlobalValue::getGUID(ValueName)];
+ /// Return a ValueInfo for GUID if it exists, otherwise return ValueInfo().
+ ValueInfo getValueInfo(GlobalValue::GUID GUID) const {
+ auto I = GlobalValueMap.find(GUID);
+ return ValueInfo(I == GlobalValueMap.end() ? nullptr : &*I);
}
- /// Get the list of global value summary objects for a given value name.
- const const_gvsummary_iterator
- findGlobalValueSummaryList(StringRef ValueName) const {
- return GlobalValueMap.find(GlobalValue::getGUID(ValueName));
+ /// Return a ValueInfo for \p GUID.
+ ValueInfo getOrInsertValueInfo(GlobalValue::GUID GUID) {
+ return ValueInfo(getOrInsertValuePtr(GUID));
}
- /// Get the list of global value summary objects for a given value GUID.
- const const_gvsummary_iterator
- findGlobalValueSummaryList(GlobalValue::GUID ValueGUID) const {
- return GlobalValueMap.find(ValueGUID);
+ /// Return a ValueInfo for \p GV and mark it as belonging to GV.
+ ValueInfo getOrInsertValueInfo(const GlobalValue *GV) {
+ auto VP = getOrInsertValuePtr(GV->getGUID());
+ VP->second.GV = GV;
+ return ValueInfo(VP);
}
/// Return the GUID for \p OriginalId in the OidGuidMap.
@@ -565,17 +554,18 @@ public:
/// Add a global value summary for a value of the given name.
void addGlobalValueSummary(StringRef ValueName,
std::unique_ptr<GlobalValueSummary> Summary) {
- addOriginalName(GlobalValue::getGUID(ValueName),
- Summary->getOriginalName());
- GlobalValueMap[GlobalValue::getGUID(ValueName)].push_back(
- std::move(Summary));
+ addGlobalValueSummary(getOrInsertValueInfo(GlobalValue::getGUID(ValueName)),
+ std::move(Summary));
}
- /// Add a global value summary for a value of the given GUID.
- void addGlobalValueSummary(GlobalValue::GUID ValueGUID,
+ /// Add a global value summary for the given ValueInfo.
+ void addGlobalValueSummary(ValueInfo VI,
std::unique_ptr<GlobalValueSummary> Summary) {
- addOriginalName(ValueGUID, Summary->getOriginalName());
- GlobalValueMap[ValueGUID].push_back(std::move(Summary));
+ addOriginalName(VI.getGUID(), Summary->getOriginalName());
+ // Here we have a notionally const VI, but the value it points to is owned
+ // by the non-const *this.
+ const_cast<GlobalValueSummaryMapTy::value_type *>(VI.Ref)
+ ->second.SummaryList.push_back(std::move(Summary));
}
/// Add an original name for the value of the given GUID.
@@ -593,16 +583,16 @@ public:
/// not found.
GlobalValueSummary *findSummaryInModule(GlobalValue::GUID ValueGUID,
StringRef ModuleId) const {
- auto CalleeInfoList = findGlobalValueSummaryList(ValueGUID);
- if (CalleeInfoList == end()) {
+ auto CalleeInfo = getValueInfo(ValueGUID);
+ if (!CalleeInfo) {
return nullptr; // This function does not have a summary
}
auto Summary =
- llvm::find_if(CalleeInfoList->second,
+ llvm::find_if(CalleeInfo.getSummaryList(),
[&](const std::unique_ptr<GlobalValueSummary> &Summary) {
return Summary->modulePath() == ModuleId;
});
- if (Summary == CalleeInfoList->second.end())
+ if (Summary == CalleeInfo.getSummaryList().end())
return nullptr;
return Summary->get();
}
diff --git a/include/llvm/IR/ModuleSummaryIndexYAML.h b/include/llvm/IR/ModuleSummaryIndexYAML.h
index 80719c696935..78fdb602027d 100644
--- a/include/llvm/IR/ModuleSummaryIndexYAML.h
+++ b/include/llvm/IR/ModuleSummaryIndexYAML.h
@@ -201,7 +201,7 @@ template <> struct CustomMappingTraits<GlobalValueSummaryMapTy> {
for (auto &FSum : FSums) {
GlobalValueSummary::GVFlags GVFlags(GlobalValue::ExternalLinkage, false,
false);
- Elem.push_back(llvm::make_unique<FunctionSummary>(
+ Elem.SummaryList.push_back(llvm::make_unique<FunctionSummary>(
GVFlags, 0, ArrayRef<ValueInfo>{},
ArrayRef<FunctionSummary::EdgeTy>{}, std::move(FSum.TypeTests),
std::move(FSum.TypeTestAssumeVCalls),
@@ -213,7 +213,7 @@ template <> struct CustomMappingTraits<GlobalValueSummaryMapTy> {
static void output(IO &io, GlobalValueSummaryMapTy &V) {
for (auto &P : V) {
std::vector<FunctionSummaryYaml> FSums;
- for (auto &Sum : P.second) {
+ for (auto &Sum : P.second.SummaryList) {
if (auto *FSum = dyn_cast<FunctionSummary>(Sum.get()))
FSums.push_back(FunctionSummaryYaml{
FSum->type_tests(), FSum->type_test_assume_vcalls(),
diff --git a/include/llvm/MC/ConstantPools.h b/include/llvm/MC/ConstantPools.h
index c34211c2bd12..5d4e32a672dd 100644
--- a/include/llvm/MC/ConstantPools.h
+++ b/include/llvm/MC/ConstantPools.h
@@ -63,6 +63,8 @@ public:
// Return true if the constant pool is empty
bool empty();
+
+ void clearCache();
};
class AssemblerConstantPools {
@@ -86,6 +88,7 @@ class AssemblerConstantPools {
public:
void emitAll(MCStreamer &Streamer);
void emitForCurrentSection(MCStreamer &Streamer);
+ void clearCacheForCurrentSection(MCStreamer &Streamer);
const MCExpr *addEntry(MCStreamer &Streamer, const MCExpr *Expr,
unsigned Size, SMLoc Loc);
diff --git a/include/llvm/Object/COFF.h b/include/llvm/Object/COFF.h
index 1b6aaf4be666..8b9b49737170 100644
--- a/include/llvm/Object/COFF.h
+++ b/include/llvm/Object/COFF.h
@@ -20,7 +20,9 @@
#include "llvm/Object/Binary.h"
#include "llvm/Object/Error.h"
#include "llvm/Object/ObjectFile.h"
+#include "llvm/Support/BinaryByteStream.h"
#include "llvm/Support/COFF.h"
+#include "llvm/Support/ConvertUTF.h"
#include "llvm/Support/Endian.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/ErrorOr.h"
@@ -40,6 +42,7 @@ class DelayImportDirectoryEntryRef;
class ExportDirectoryEntryRef;
class ImportDirectoryEntryRef;
class ImportedSymbolRef;
+class ResourceSectionRef;
using import_directory_iterator = content_iterator<ImportDirectoryEntryRef>;
using delay_import_directory_iterator =
@@ -623,6 +626,26 @@ struct coff_base_reloc_block_entry {
int getOffset() const { return Data & ((1 << 12) - 1); }
};
+struct coff_resource_dir_entry {
+ union {
+ support::ulittle32_t NameOffset;
+ support::ulittle32_t ID;
+ uint32_t getNameOffset() const {
+ return maskTrailingOnes<uint32_t>(31) & NameOffset;
+ }
+ } Identifier;
+ union {
+ support::ulittle32_t DataEntryOffset;
+ support::ulittle32_t SubdirOffset;
+
+ bool isSubDir() const { return SubdirOffset >> 31; }
+ uint32_t value() const {
+ return maskTrailingOnes<uint32_t>(31) & SubdirOffset;
+ }
+
+ } Offset;
+};
+
struct coff_resource_dir_table {
support::ulittle32_t Characteristics;
support::ulittle32_t TimeDateStamp;
@@ -1047,6 +1070,23 @@ private:
const COFFObjectFile *OwningObject = nullptr;
};
+class ResourceSectionRef {
+public:
+ ResourceSectionRef() = default;
+ explicit ResourceSectionRef(StringRef Ref) : BBS(Ref, support::little) {}
+
+ ErrorOr<ArrayRef<UTF16>> getEntryNameString(const coff_resource_dir_entry &Entry);
+ ErrorOr<const coff_resource_dir_table &>
+ getEntrySubDir(const coff_resource_dir_entry &Entry);
+ ErrorOr<const coff_resource_dir_table &> getBaseTable();
+
+private:
+ BinaryByteStream BBS;
+
+ ErrorOr<const coff_resource_dir_table &> getTableAtOffset(uint32_t Offset);
+ ErrorOr<ArrayRef<UTF16>> getDirStringAtOffset(uint32_t Offset);
+};
+
// Corresponds to `_FPO_DATA` structure in the PE/COFF spec.
struct FpoData {
support::ulittle32_t Offset; // ulOffStart: Offset 1st byte of function code
diff --git a/include/llvm/Object/Wasm.h b/include/llvm/Object/Wasm.h
index 6b6bbe252f65..4bc39d98b7af 100644
--- a/include/llvm/Object/Wasm.h
+++ b/include/llvm/Object/Wasm.h
@@ -41,10 +41,14 @@ public:
DEBUG_FUNCTION_NAME,
};
- WasmSymbol(StringRef Name, SymbolType Type) : Name(Name), Type(Type) {}
+ WasmSymbol(StringRef Name, SymbolType Type, uint32_t Section,
+ uint32_t ElementIndex)
+ : Name(Name), Type(Type), Section(Section), ElementIndex(ElementIndex) {}
StringRef Name;
SymbolType Type;
+ uint32_t Section;
+ uint32_t ElementIndex;
};
class WasmSection {
diff --git a/include/llvm/ObjectYAML/WasmYAML.h b/include/llvm/ObjectYAML/WasmYAML.h
index dfeeb8589f82..bd7d72be4dbc 100644
--- a/include/llvm/ObjectYAML/WasmYAML.h
+++ b/include/llvm/ObjectYAML/WasmYAML.h
@@ -97,6 +97,11 @@ struct DataSegment {
yaml::BinaryRef Content;
};
+struct NameEntry {
+ uint32_t Index;
+ StringRef Name;
+};
+
struct Signature {
Signature() : Form(wasm::WASM_TYPE_FUNC) {}
@@ -122,6 +127,11 @@ struct CustomSection : Section {
StringRef Name;
yaml::BinaryRef Payload;
+
+ // The follow is used by the "name" custom section.
+ // TODO(sbc): Add support for more then just functions names. The wasm
+ // name section can support multiple sub-sections.
+ std::vector<NameEntry> FunctionNames;
};
struct TypeSection : Section {
@@ -244,6 +254,7 @@ LLVM_YAML_IS_SEQUENCE_VECTOR(llvm::WasmYAML::Global)
LLVM_YAML_IS_SEQUENCE_VECTOR(llvm::WasmYAML::Function)
LLVM_YAML_IS_SEQUENCE_VECTOR(llvm::WasmYAML::LocalDecl)
LLVM_YAML_IS_SEQUENCE_VECTOR(llvm::WasmYAML::Relocation)
+LLVM_YAML_IS_SEQUENCE_VECTOR(llvm::WasmYAML::NameEntry)
LLVM_YAML_IS_FLOW_SEQUENCE_VECTOR(uint32_t)
namespace llvm {
@@ -297,6 +308,10 @@ template <> struct MappingTraits<WasmYAML::Relocation> {
static void mapping(IO &IO, WasmYAML::Relocation &Relocation);
};
+template <> struct MappingTraits<WasmYAML::NameEntry> {
+ static void mapping(IO &IO, WasmYAML::NameEntry &NameEntry);
+};
+
template <> struct MappingTraits<WasmYAML::LocalDecl> {
static void mapping(IO &IO, WasmYAML::LocalDecl &LocalDecl);
};
diff --git a/include/llvm/Support/AArch64TargetParser.def b/include/llvm/Support/AArch64TargetParser.def
index 1700deadeaef..8eccebcd932a 100644
--- a/include/llvm/Support/AArch64TargetParser.def
+++ b/include/llvm/Support/AArch64TargetParser.def
@@ -20,8 +20,7 @@ AARCH64_ARCH("invalid", AK_INVALID, nullptr, nullptr,
ARMBuildAttrs::CPUArch::v8_A, FK_NONE, AArch64::AEK_NONE)
AARCH64_ARCH("armv8-a", AK_ARMV8A, "8-A", "v8", ARMBuildAttrs::CPUArch::v8_A,
FK_CRYPTO_NEON_FP_ARMV8,
- (AArch64::AEK_CRC | AArch64::AEK_CRYPTO | AArch64::AEK_FP |
- AArch64::AEK_SIMD))
+ (AArch64::AEK_CRYPTO | AArch64::AEK_FP | AArch64::AEK_SIMD))
AARCH64_ARCH("armv8.1-a", AK_ARMV8_1A, "8.1-A", "v8.1a",
ARMBuildAttrs::CPUArch::v8_A, FK_CRYPTO_NEON_FP_ARMV8,
(AArch64::AEK_CRC | AArch64::AEK_CRYPTO | AArch64::AEK_FP |
@@ -52,38 +51,37 @@ AARCH64_ARCH_EXT_NAME("ras", AArch64::AEK_RAS, "+ras", "-ras")
#define AARCH64_CPU_NAME(NAME, ID, DEFAULT_FPU, IS_DEFAULT, DEFAULT_EXT)
#endif
AARCH64_CPU_NAME("cortex-a35", AK_ARMV8A, FK_CRYPTO_NEON_FP_ARMV8, false,
- (AArch64::AEK_SIMD | AArch64::AEK_CRC | AArch64::AEK_CRYPTO))
+ (AArch64::AEK_CRC))
AARCH64_CPU_NAME("cortex-a53", AK_ARMV8A, FK_CRYPTO_NEON_FP_ARMV8, true,
- ( AArch64::AEK_SIMD | AArch64::AEK_CRC | AArch64::AEK_CRYPTO))
+ (AArch64::AEK_CRC))
AARCH64_CPU_NAME("cortex-a57", AK_ARMV8A, FK_CRYPTO_NEON_FP_ARMV8, false,
- (AArch64::AEK_SIMD | AArch64::AEK_CRC | AArch64::AEK_CRYPTO))
+ (AArch64::AEK_CRC))
AARCH64_CPU_NAME("cortex-a72", AK_ARMV8A, FK_CRYPTO_NEON_FP_ARMV8, false,
- (AArch64::AEK_SIMD | AArch64::AEK_CRC | AArch64::AEK_CRYPTO))
+ (AArch64::AEK_CRC))
AARCH64_CPU_NAME("cortex-a73", AK_ARMV8A, FK_CRYPTO_NEON_FP_ARMV8, false,
- (AArch64::AEK_SIMD | AArch64::AEK_CRC | AArch64::AEK_CRYPTO))
+ (AArch64::AEK_CRC))
AARCH64_CPU_NAME("cyclone", AK_ARMV8A, FK_CRYPTO_NEON_FP_ARMV8, false,
- (AArch64::AEK_SIMD | AArch64::AEK_CRYPTO))
+ (AArch64::AEK_NONE))
AARCH64_CPU_NAME("exynos-m1", AK_ARMV8A, FK_CRYPTO_NEON_FP_ARMV8, false,
- (AArch64::AEK_SIMD | AArch64::AEK_CRC | AArch64::AEK_CRYPTO))
+ (AArch64::AEK_CRC))
AARCH64_CPU_NAME("exynos-m2", AK_ARMV8A, FK_CRYPTO_NEON_FP_ARMV8, false,
- (AArch64::AEK_SIMD | AArch64::AEK_CRC | AArch64::AEK_CRYPTO))
+ (AArch64::AEK_CRC))
AARCH64_CPU_NAME("exynos-m3", AK_ARMV8A, FK_CRYPTO_NEON_FP_ARMV8, false,
- (AArch64::AEK_SIMD | AArch64::AEK_CRC | AArch64::AEK_CRYPTO))
+ (AArch64::AEK_CRC))
AARCH64_CPU_NAME("falkor", AK_ARMV8A, FK_CRYPTO_NEON_FP_ARMV8, false,
- (AArch64::AEK_SIMD | AArch64::AEK_CRC | AArch64::AEK_CRYPTO))
+ (AArch64::AEK_CRC))
AARCH64_CPU_NAME("kryo", AK_ARMV8A, FK_CRYPTO_NEON_FP_ARMV8, false,
- (AArch64::AEK_SIMD | AArch64::AEK_CRC | AArch64::AEK_CRYPTO))
+ (AArch64::AEK_CRC))
AARCH64_CPU_NAME("thunderx2t99", AK_ARMV8_1A, FK_CRYPTO_NEON_FP_ARMV8, false,
- (AArch64::AEK_SIMD | AArch64::AEK_LSE | AArch64::AEK_CRC |
- AArch64::AEK_CRYPTO))
+ (AArch64::AEK_NONE))
AARCH64_CPU_NAME("thunderx", AK_ARMV8A, FK_CRYPTO_NEON_FP_ARMV8, false,
- (AArch64::AEK_SIMD | AArch64::AEK_CRC | AArch64::AEK_CRYPTO | AArch64::AEK_FP | AArch64::AEK_PROFILE))
+ (AArch64::AEK_CRC | AArch64::AEK_PROFILE))
AARCH64_CPU_NAME("thunderxt88", AK_ARMV8A, FK_CRYPTO_NEON_FP_ARMV8, false,
- (AArch64::AEK_SIMD | AArch64::AEK_CRC | AArch64::AEK_CRYPTO | AArch64::AEK_FP | AArch64::AEK_PROFILE))
+ (AArch64::AEK_CRC | AArch64::AEK_PROFILE))
AARCH64_CPU_NAME("thunderxt81", AK_ARMV8A, FK_CRYPTO_NEON_FP_ARMV8, false,
- (AArch64::AEK_SIMD | AArch64::AEK_CRC | AArch64::AEK_CRYPTO | AArch64::AEK_FP | AArch64::AEK_PROFILE))
+ (AArch64::AEK_CRC | AArch64::AEK_PROFILE))
AARCH64_CPU_NAME("thunderxt83", AK_ARMV8A, FK_CRYPTO_NEON_FP_ARMV8, false,
- (AArch64::AEK_SIMD | AArch64::AEK_CRC | AArch64::AEK_CRYPTO | AArch64::AEK_FP | AArch64::AEK_PROFILE))
+ (AArch64::AEK_CRC | AArch64::AEK_PROFILE))
// Invalid CPU
AARCH64_CPU_NAME("invalid", AK_INVALID, FK_INVALID, true, AArch64::AEK_INVALID)
#undef AARCH64_CPU_NAME
diff --git a/include/llvm/Support/BinaryStreamArray.h b/include/llvm/Support/BinaryStreamArray.h
index f141c30f16c7..bad31cd38d6a 100644
--- a/include/llvm/Support/BinaryStreamArray.h
+++ b/include/llvm/Support/BinaryStreamArray.h
@@ -64,8 +64,10 @@ class VarStreamArrayIterator
public:
VarStreamArrayIterator() = default;
VarStreamArrayIterator(const ArrayType &Array, const WrappedCtx &Ctx,
- BinaryStreamRef Stream, bool *HadError = nullptr)
- : IterRef(Stream), Ctx(&Ctx), Array(&Array), HadError(HadError) {
+ BinaryStreamRef Stream, bool *HadError = nullptr,
+ uint32_t Offset = 0)
+ : IterRef(Stream), Ctx(&Ctx), Array(&Array), AbsOffset(Offset),
+ HadError(HadError) {
if (IterRef.getLength() == 0)
moveToEnd();
else {
@@ -115,6 +117,7 @@ public:
for (unsigned I = 0; I < N; ++I) {
// We are done with the current record, discard it so that we are
// positioned at the next record.
+ AbsOffset += ThisLen;
IterRef = IterRef.drop_front(ThisLen);
if (IterRef.getLength() == 0) {
// There is nothing after the current record, we must make this an end
@@ -135,6 +138,8 @@ public:
return *this;
}
+ uint32_t offset() const { return AbsOffset; }
+
private:
void moveToEnd() {
Array = nullptr;
@@ -152,6 +157,7 @@ private:
const WrappedCtx *Ctx{nullptr};
const ArrayType *Array{nullptr};
uint32_t ThisLen{0};
+ uint32_t AbsOffset{0};
bool HasError{false};
bool *HadError{nullptr};
};
@@ -234,7 +240,7 @@ public:
/// since the behavior is undefined if \p Offset does not refer to the
/// beginning of a valid record.
Iterator at(uint32_t Offset) const {
- return Iterator(*this, Ctx, Stream.drop_front(Offset), nullptr);
+ return Iterator(*this, Ctx, Stream.drop_front(Offset), nullptr, Offset);
}
BinaryStreamRef getUnderlyingStream() const { return Stream; }
@@ -338,7 +344,7 @@ private:
template <typename T>
class FixedStreamArrayIterator
: public iterator_facade_base<FixedStreamArrayIterator<T>,
- std::random_access_iterator_tag, T> {
+ std::random_access_iterator_tag, const T> {
public:
FixedStreamArrayIterator(const FixedStreamArray<T> &Array, uint32_t Index)
@@ -352,6 +358,7 @@ public:
}
const T &operator*() const { return Array[Index]; }
+ const T &operator*() { return Array[Index]; }
bool operator==(const FixedStreamArrayIterator<T> &R) const {
assert(Array == R.Array);
diff --git a/include/llvm/Support/COFF.h b/include/llvm/Support/COFF.h
index 19223306bd07..bc2098e2b5cf 100644
--- a/include/llvm/Support/COFF.h
+++ b/include/llvm/Support/COFF.h
@@ -152,6 +152,30 @@ namespace COFF {
IMAGE_FILE_BYTES_REVERSED_HI = 0x8000
};
+ enum ResourceTypeID {
+ RID_Cursor = 1,
+ RID_Bitmap = 2,
+ RID_Icon = 3,
+ RID_Menu = 4,
+ RID_Dialog = 5,
+ RID_String = 6,
+ RID_FontDir = 7,
+ RID_Font = 8,
+ RID_Accelerator = 9,
+ RID_RCData = 10,
+ RID_MessageTable = 11,
+ RID_Group_Cursor = 12,
+ RID_Group_Icon = 14,
+ RID_Version = 16,
+ RID_DLGInclude = 17,
+ RID_PlugPlay = 19,
+ RID_VXD = 20,
+ RID_AniCursor = 21,
+ RID_AniIcon = 22,
+ RID_HTML = 23,
+ RID_Manifest = 24,
+ };
+
struct symbol {
char Name[NameSize];
uint32_t Value;
@@ -349,6 +373,26 @@ namespace COFF {
IMAGE_REL_ARM_BLX23T = 0x0015
};
+ enum RelocationTypesARM64 {
+ IMAGE_REL_ARM64_ABSOLUTE = 0x0000,
+ IMAGE_REL_ARM64_ADDR32 = 0x0001,
+ IMAGE_REL_ARM64_ADDR32NB = 0x0002,
+ IMAGE_REL_ARM64_BRANCH26 = 0x0003,
+ IMAGE_REL_ARM64_PAGEBASE_REL2 = 0x0004,
+ IMAGE_REL_ARM64_REL21 = 0x0005,
+ IMAGE_REL_ARM64_PAGEOFFSET_12A = 0x0006,
+ IMAGE_REL_ARM64_PAGEOFFSET_12L = 0x0007,
+ IMAGE_REL_ARM64_SECREL = 0x0008,
+ IMAGE_REL_ARM64_SECREL_LOW12A = 0x0009,
+ IMAGE_REL_ARM64_SECREL_HIGH12A = 0x000A,
+ IMAGE_REL_ARM64_SECREL_LOW12L = 0x000B,
+ IMAGE_REL_ARM64_TOKEN = 0x000C,
+ IMAGE_REL_ARM64_SECTION = 0x000D,
+ IMAGE_REL_ARM64_ADDR64 = 0x000E,
+ IMAGE_REL_ARM64_BRANCH19 = 0x000F,
+ IMAGE_REL_ARM64_BRANCH14 = 0x0010,
+ };
+
enum COMDATType {
IMAGE_COMDAT_SELECT_NODUPLICATES = 1,
IMAGE_COMDAT_SELECT_ANY,
diff --git a/include/llvm/Support/KnownBits.h b/include/llvm/Support/KnownBits.h
index 292ea9e4b717..3d38cf878538 100644
--- a/include/llvm/Support/KnownBits.h
+++ b/include/llvm/Support/KnownBits.h
@@ -24,6 +24,12 @@ struct KnownBits {
APInt Zero;
APInt One;
+private:
+ // Internal constructor for creating a ConstantRange from two APInts.
+ KnownBits(APInt Zero, APInt One)
+ : Zero(std::move(Zero)), One(std::move(One)) {}
+
+public:
// Default construct Zero and One.
KnownBits() {}
@@ -37,6 +43,55 @@ struct KnownBits {
return Zero.getBitWidth();
}
+ /// Returns true if there is conflicting information.
+ bool hasConflict() const { return Zero.intersects(One); }
+
+ /// Returns true if we know the value of all bits.
+ bool isConstant() const {
+ assert(!hasConflict() && "KnownBits conflict!");
+ return Zero.countPopulation() + One.countPopulation() == getBitWidth();
+ }
+
+ /// Returns the value when all bits have a known value. This just returns One
+ /// with a protective assertion.
+ const APInt &getConstant() const {
+ assert(isConstant() && "Can only get value when all bits are known");
+ return One;
+ }
+
+ /// Returns true if we don't know any bits.
+ bool isUnknown() const { return Zero.isNullValue() && One.isNullValue(); }
+
+ /// Resets the known state of all bits.
+ void resetAll() {
+ Zero.clearAllBits();
+ One.clearAllBits();
+ }
+
+ /// Returns true if value is all zero.
+ bool isZero() const {
+ assert(!hasConflict() && "KnownBits conflict!");
+ return Zero.isAllOnesValue();
+ }
+
+ /// Returns true if value is all one bits.
+ bool isAllOnes() const {
+ assert(!hasConflict() && "KnownBits conflict!");
+ return One.isAllOnesValue();
+ }
+
+ /// Make all bits known to be zero and discard any previous information.
+ void setAllZero() {
+ Zero.setAllBits();
+ One.clearAllBits();
+ }
+
+ /// Make all bits known to be one and discard any previous information.
+ void setAllOnes() {
+ Zero.clearAllBits();
+ One.setAllBits();
+ }
+
/// Returns true if this value is known to be negative.
bool isNegative() const { return One.isSignBitSet(); }
@@ -54,6 +109,30 @@ struct KnownBits {
assert(!isNegative() && "Can't make a negative value non-negative");
Zero.setSignBit();
}
+
+ /// Truncate the underlying known Zero and One bits. This is equivalent
+ /// to truncating the value we're tracking.
+ KnownBits trunc(unsigned BitWidth) {
+ return KnownBits(Zero.trunc(BitWidth), One.trunc(BitWidth));
+ }
+
+ /// Zero extends the underlying known Zero and One bits. This is equivalent
+ /// to zero extending the value we're tracking.
+ KnownBits zext(unsigned BitWidth) {
+ return KnownBits(Zero.zext(BitWidth), One.zext(BitWidth));
+ }
+
+ /// Sign extends the underlying known Zero and One bits. This is equivalent
+ /// to sign extending the value we're tracking.
+ KnownBits sext(unsigned BitWidth) {
+ return KnownBits(Zero.sext(BitWidth), One.sext(BitWidth));
+ }
+
+ /// Zero extends or truncates the underlying known Zero and One bits. This is
+ /// equivalent to zero extending or truncating the value we're tracking.
+ KnownBits zextOrTrunc(unsigned BitWidth) {
+ return KnownBits(Zero.zextOrTrunc(BitWidth), One.zextOrTrunc(BitWidth));
+ }
};
} // end namespace llvm
diff --git a/include/llvm/Support/MathExtras.h b/include/llvm/Support/MathExtras.h
index 994456f9a681..7f07e8cc3a51 100644
--- a/include/llvm/Support/MathExtras.h
+++ b/include/llvm/Support/MathExtras.h
@@ -214,6 +214,18 @@ template <typename T> T maskLeadingOnes(unsigned N) {
return ~maskTrailingOnes<T>(CHAR_BIT * sizeof(T) - N);
}
+/// \brief Create a bitmask with the N right-most bits set to 0, and all other
+/// bits set to 1. Only unsigned types are allowed.
+template <typename T> T maskTrailingZeros(unsigned N) {
+ return maskLeadingOnes<T>(CHAR_BIT * sizeof(T) - N);
+}
+
+/// \brief Create a bitmask with the N left-most bits set to 0, and all other
+/// bits set to 1. Only unsigned types are allowed.
+template <typename T> T maskLeadingZeros(unsigned N) {
+ return maskTrailingOnes<T>(CHAR_BIT * sizeof(T) - N);
+}
+
/// \brief Get the index of the last set bit starting from the least
/// significant bit.
///
diff --git a/include/llvm/Target/GlobalISel/SelectionDAGCompat.td b/include/llvm/Target/GlobalISel/SelectionDAGCompat.td
index 9f034220815f..a06c67fe814c 100644
--- a/include/llvm/Target/GlobalISel/SelectionDAGCompat.td
+++ b/include/llvm/Target/GlobalISel/SelectionDAGCompat.td
@@ -25,25 +25,43 @@ class GINodeEquiv<Instruction i, SDNode node> {
SDNode Node = node;
}
-def : GINodeEquiv<G_ZEXT, zext>;
+// These are defined in the same order as the G_* instructions.
+def : GINodeEquiv<G_ANYEXT, anyext>;
def : GINodeEquiv<G_SEXT, sext>;
+def : GINodeEquiv<G_ZEXT, zext>;
+def : GINodeEquiv<G_TRUNC, trunc>;
+def : GINodeEquiv<G_BITCAST, bitconvert>;
+// G_INTTOPTR - SelectionDAG has no equivalent.
+// G_PTRTOINT - SelectionDAG has no equivalent.
+// G_CONSTANT - Not needed since constants aren't operators.
+// G_FCONSTANT - Not needed since constants aren't operators.
def : GINodeEquiv<G_ADD, add>;
def : GINodeEquiv<G_SUB, sub>;
def : GINodeEquiv<G_MUL, mul>;
-
+def : GINodeEquiv<G_SDIV, sdiv>;
+def : GINodeEquiv<G_UDIV, udiv>;
+def : GINodeEquiv<G_SREM, srem>;
+def : GINodeEquiv<G_UREM, urem>;
+def : GINodeEquiv<G_AND, and>;
def : GINodeEquiv<G_OR, or>;
def : GINodeEquiv<G_XOR, xor>;
-def : GINodeEquiv<G_AND, and>;
-
def : GINodeEquiv<G_SHL, shl>;
def : GINodeEquiv<G_LSHR, srl>;
def : GINodeEquiv<G_ASHR, sra>;
-
-def : GINodeEquiv<G_SDIV, sdiv>;
-def : GINodeEquiv<G_UDIV, udiv>;
-def : GINodeEquiv<G_SREM, srem>;
-def : GINodeEquiv<G_UREM, urem>;
-
+def : GINodeEquiv<G_SELECT, select>;
+def : GINodeEquiv<G_FNEG, fneg>;
+def : GINodeEquiv<G_FPEXT, fpextend>;
+def : GINodeEquiv<G_FPTRUNC, ftrunc>;
+def : GINodeEquiv<G_FPTOSI, fp_to_sint>;
+def : GINodeEquiv<G_FPTOUI, fp_to_uint>;
+def : GINodeEquiv<G_SITOFP, sint_to_fp>;
+def : GINodeEquiv<G_UITOFP, uint_to_fp>;
+def : GINodeEquiv<G_FADD, fadd>;
+def : GINodeEquiv<G_FSUB, fsub>;
+def : GINodeEquiv<G_FMUL, fmul>;
+def : GINodeEquiv<G_FDIV, fdiv>;
+def : GINodeEquiv<G_FREM, frem>;
+def : GINodeEquiv<G_FPOW, fpow>;
def : GINodeEquiv<G_BR, br>;
// Specifies the GlobalISel equivalents for SelectionDAG's ComplexPattern.
diff --git a/include/llvm/Target/Target.td b/include/llvm/Target/Target.td
index d7fbca93f59b..fc35b4527bc3 100644
--- a/include/llvm/Target/Target.td
+++ b/include/llvm/Target/Target.td
@@ -1002,6 +1002,16 @@ def PATCHABLE_TAIL_CALL : Instruction {
let hasSideEffects = 1;
let isReturn = 1;
}
+def PATCHABLE_EVENT_CALL : Instruction {
+ let OutOperandList = (outs);
+ let InOperandList = (ins ptr_rc:$event, i8imm:$size);
+ let AsmString = "# XRay Custom Event Log.";
+ let usesCustomInserter = 1;
+ let isCall = 1;
+ let mayLoad = 1;
+ let mayStore = 1;
+ let hasSideEffects = 1;
+}
def FENTRY_CALL : Instruction {
let OutOperandList = (outs unknown:$dst);
let InOperandList = (ins variable_ops);
diff --git a/include/llvm/Target/TargetOpcodes.def b/include/llvm/Target/TargetOpcodes.def
index 96db6e0a9769..36764249632d 100644
--- a/include/llvm/Target/TargetOpcodes.def
+++ b/include/llvm/Target/TargetOpcodes.def
@@ -182,6 +182,10 @@ HANDLE_TARGET_OPCODE(PATCHABLE_FUNCTION_EXIT)
/// PATCHABLE_RET which specifically only works for return instructions.
HANDLE_TARGET_OPCODE(PATCHABLE_TAIL_CALL)
+/// Wraps a logging call and its arguments with nop sleds. At runtime, this can be
+/// patched to insert instrumentation instructions.
+HANDLE_TARGET_OPCODE(PATCHABLE_EVENT_CALL)
+
/// The following generic opcodes are not supposed to appear after ISel.
/// This is something we might want to relax, but for now, this is convenient
/// to produce diagnostics.
diff --git a/include/llvm/Transforms/Instrumentation.h b/include/llvm/Transforms/Instrumentation.h
index db6723da1e61..023d7af7f729 100644
--- a/include/llvm/Transforms/Instrumentation.h
+++ b/include/llvm/Transforms/Instrumentation.h
@@ -177,6 +177,7 @@ struct SanitizerCoverageOptions {
bool Use8bitCounters = false;
bool TracePC = false;
bool TracePCGuard = false;
+ bool NoPrune = false;
SanitizerCoverageOptions() = default;
};
diff --git a/include/llvm/Transforms/Scalar/Float2Int.h b/include/llvm/Transforms/Scalar/Float2Int.h
index a8042399fb08..206ee980109b 100644
--- a/include/llvm/Transforms/Scalar/Float2Int.h
+++ b/include/llvm/Transforms/Scalar/Float2Int.h
@@ -31,7 +31,7 @@ public:
private:
void findRoots(Function &F, SmallPtrSet<Instruction *, 8> &Roots);
- ConstantRange seen(Instruction *I, ConstantRange R);
+ void seen(Instruction *I, ConstantRange R);
ConstantRange badRange();
ConstantRange unknownRange();
ConstantRange validateRange(ConstantRange R);
diff --git a/lib/Analysis/ConstantFolding.cpp b/lib/Analysis/ConstantFolding.cpp
index 863fbdba7e67..130e917e49d7 100644
--- a/lib/Analysis/ConstantFolding.cpp
+++ b/lib/Analysis/ConstantFolding.cpp
@@ -701,11 +701,10 @@ Constant *SymbolicallyEvaluateBinop(unsigned Opc, Constant *Op0, Constant *Op1,
return Op1;
}
- APInt KnownZero = Known0.Zero | Known1.Zero;
- APInt KnownOne = Known0.One & Known1.One;
- if ((KnownZero | KnownOne).isAllOnesValue()) {
- return ConstantInt::get(Op0->getType(), KnownOne);
- }
+ Known0.Zero |= Known1.Zero;
+ Known0.One &= Known1.One;
+ if (Known0.isConstant())
+ return ConstantInt::get(Op0->getType(), Known0.getConstant());
}
// If the constant expr is something like &A[123] - &A[4].f, fold this into a
diff --git a/lib/Analysis/InstructionSimplify.cpp b/lib/Analysis/InstructionSimplify.cpp
index 7aa6abf8fa48..4a713f441ce8 100644
--- a/lib/Analysis/InstructionSimplify.cpp
+++ b/lib/Analysis/InstructionSimplify.cpp
@@ -1495,36 +1495,87 @@ static Value *simplifyAndOfICmpsWithSameOperands(ICmpInst *Op0, ICmpInst *Op1) {
/// Commuted variants are assumed to be handled by calling this function again
/// with the parameters swapped.
-static Value *SimplifyAndOfICmps(ICmpInst *Op0, ICmpInst *Op1) {
+static Value *simplifyOrOfICmpsWithSameOperands(ICmpInst *Op0, ICmpInst *Op1) {
+ ICmpInst::Predicate Pred0, Pred1;
+ Value *A ,*B;
+ if (!match(Op0, m_ICmp(Pred0, m_Value(A), m_Value(B))) ||
+ !match(Op1, m_ICmp(Pred1, m_Specific(A), m_Specific(B))))
+ return nullptr;
+
+ // We have (icmp Pred0, A, B) | (icmp Pred1, A, B).
+ // If Op1 is always implied true by Op0, then Op0 is a subset of Op1, and we
+ // can eliminate Op0 from this 'or'.
+ if (ICmpInst::isImpliedTrueByMatchingCmp(Pred0, Pred1))
+ return Op1;
+
+ // Check for any combination of predicates that cover the entire range of
+ // possibilities.
+ if ((Pred0 == ICmpInst::getInversePredicate(Pred1)) ||
+ (Pred0 == ICmpInst::ICMP_NE && ICmpInst::isTrueWhenEqual(Pred1)) ||
+ (Pred0 == ICmpInst::ICMP_SLE && Pred1 == ICmpInst::ICMP_SGE) ||
+ (Pred0 == ICmpInst::ICMP_ULE && Pred1 == ICmpInst::ICMP_UGE))
+ return getTrue(Op0->getType());
+
+ return nullptr;
+}
+
+/// Test if a pair of compares with a shared operand and 2 constants has an
+/// empty set intersection, full set union, or if one compare is a superset of
+/// the other.
+static Value *simplifyAndOrOfICmpsWithConstants(ICmpInst *Cmp0, ICmpInst *Cmp1,
+ bool IsAnd) {
+ // Look for this pattern: {and/or} (icmp X, C0), (icmp X, C1)).
+ if (Cmp0->getOperand(0) != Cmp1->getOperand(0))
+ return nullptr;
+
+ const APInt *C0, *C1;
+ if (!match(Cmp0->getOperand(1), m_APInt(C0)) ||
+ !match(Cmp1->getOperand(1), m_APInt(C1)))
+ return nullptr;
+
+ auto Range0 = ConstantRange::makeExactICmpRegion(Cmp0->getPredicate(), *C0);
+ auto Range1 = ConstantRange::makeExactICmpRegion(Cmp1->getPredicate(), *C1);
+
+ // For and-of-comapares, check if the intersection is empty:
+ // (icmp X, C0) && (icmp X, C1) --> empty set --> false
+ if (IsAnd && Range0.intersectWith(Range1).isEmptySet())
+ return getFalse(Cmp0->getType());
+
+ // For or-of-compares, check if the union is full:
+ // (icmp X, C0) || (icmp X, C1) --> full set --> true
+ if (!IsAnd && Range0.unionWith(Range1).isFullSet())
+ return getTrue(Cmp0->getType());
+
+ // Is one range a superset of the other?
+ // If this is and-of-compares, take the smaller set:
+ // (icmp sgt X, 4) && (icmp sgt X, 42) --> icmp sgt X, 42
+ // If this is or-of-compares, take the larger set:
+ // (icmp sgt X, 4) || (icmp sgt X, 42) --> icmp sgt X, 4
+ if (Range0.contains(Range1))
+ return IsAnd ? Cmp1 : Cmp0;
+ if (Range1.contains(Range0))
+ return IsAnd ? Cmp0 : Cmp1;
+
+ return nullptr;
+}
+
+/// Commuted variants are assumed to be handled by calling this function again
+/// with the parameters swapped.
+static Value *simplifyAndOfICmps(ICmpInst *Op0, ICmpInst *Op1) {
if (Value *X = simplifyUnsignedRangeCheck(Op0, Op1, /*IsAnd=*/true))
return X;
if (Value *X = simplifyAndOfICmpsWithSameOperands(Op0, Op1))
return X;
- // FIXME: This should be shared with or-of-icmps.
- // Look for this pattern: (icmp V, C0) & (icmp V, C1)).
+ if (Value *X = simplifyAndOrOfICmpsWithConstants(Op0, Op1, true))
+ return X;
+
+ // (icmp (add V, C0), C1) & (icmp V, C0)
Type *ITy = Op0->getType();
ICmpInst::Predicate Pred0, Pred1;
const APInt *C0, *C1;
Value *V;
- if (match(Op0, m_ICmp(Pred0, m_Value(V), m_APInt(C0))) &&
- match(Op1, m_ICmp(Pred1, m_Specific(V), m_APInt(C1)))) {
- // Make a constant range that's the intersection of the two icmp ranges.
- // If the intersection is empty, we know that the result is false.
- auto Range0 = ConstantRange::makeExactICmpRegion(Pred0, *C0);
- auto Range1 = ConstantRange::makeExactICmpRegion(Pred1, *C1);
- if (Range0.intersectWith(Range1).isEmptySet())
- return getFalse(ITy);
-
- // If a range is a superset of the other, the smaller set is all we need.
- if (Range0.contains(Range1))
- return Op1;
- if (Range1.contains(Range0))
- return Op0;
- }
-
- // (icmp (add V, C0), C1) & (icmp V, C0)
if (!match(Op0, m_ICmp(Pred0, m_Add(m_Value(V), m_APInt(C0)), m_APInt(C1))))
return nullptr;
@@ -1565,6 +1616,103 @@ static Value *SimplifyAndOfICmps(ICmpInst *Op0, ICmpInst *Op1) {
return nullptr;
}
+/// Commuted variants are assumed to be handled by calling this function again
+/// with the parameters swapped.
+static Value *simplifyOrOfICmps(ICmpInst *Op0, ICmpInst *Op1) {
+ if (Value *X = simplifyUnsignedRangeCheck(Op0, Op1, /*IsAnd=*/false))
+ return X;
+
+ if (Value *X = simplifyOrOfICmpsWithSameOperands(Op0, Op1))
+ return X;
+
+ if (Value *X = simplifyAndOrOfICmpsWithConstants(Op0, Op1, false))
+ return X;
+
+ // (icmp (add V, C0), C1) | (icmp V, C0)
+ ICmpInst::Predicate Pred0, Pred1;
+ const APInt *C0, *C1;
+ Value *V;
+ if (!match(Op0, m_ICmp(Pred0, m_Add(m_Value(V), m_APInt(C0)), m_APInt(C1))))
+ return nullptr;
+
+ if (!match(Op1, m_ICmp(Pred1, m_Specific(V), m_Value())))
+ return nullptr;
+
+ auto *AddInst = cast<BinaryOperator>(Op0->getOperand(0));
+ if (AddInst->getOperand(1) != Op1->getOperand(1))
+ return nullptr;
+
+ Type *ITy = Op0->getType();
+ bool isNSW = AddInst->hasNoSignedWrap();
+ bool isNUW = AddInst->hasNoUnsignedWrap();
+
+ const APInt Delta = *C1 - *C0;
+ if (C0->isStrictlyPositive()) {
+ if (Delta == 2) {
+ if (Pred0 == ICmpInst::ICMP_UGE && Pred1 == ICmpInst::ICMP_SLE)
+ return getTrue(ITy);
+ if (Pred0 == ICmpInst::ICMP_SGE && Pred1 == ICmpInst::ICMP_SLE && isNSW)
+ return getTrue(ITy);
+ }
+ if (Delta == 1) {
+ if (Pred0 == ICmpInst::ICMP_UGT && Pred1 == ICmpInst::ICMP_SLE)
+ return getTrue(ITy);
+ if (Pred0 == ICmpInst::ICMP_SGT && Pred1 == ICmpInst::ICMP_SLE && isNSW)
+ return getTrue(ITy);
+ }
+ }
+ if (C0->getBoolValue() && isNUW) {
+ if (Delta == 2)
+ if (Pred0 == ICmpInst::ICMP_UGE && Pred1 == ICmpInst::ICMP_ULE)
+ return getTrue(ITy);
+ if (Delta == 1)
+ if (Pred0 == ICmpInst::ICMP_UGT && Pred1 == ICmpInst::ICMP_ULE)
+ return getTrue(ITy);
+ }
+
+ return nullptr;
+}
+
+static Value *simplifyPossiblyCastedAndOrOfICmps(ICmpInst *Cmp0, ICmpInst *Cmp1,
+ bool IsAnd, CastInst *Cast) {
+ Value *V =
+ IsAnd ? simplifyAndOfICmps(Cmp0, Cmp1) : simplifyOrOfICmps(Cmp0, Cmp1);
+ if (!V)
+ return nullptr;
+ if (!Cast)
+ return V;
+
+ // If we looked through casts, we can only handle a constant simplification
+ // because we are not allowed to create a cast instruction here.
+ if (auto *C = dyn_cast<Constant>(V))
+ return ConstantExpr::getCast(Cast->getOpcode(), C, Cast->getType());
+
+ return nullptr;
+}
+
+static Value *simplifyAndOrOfICmps(Value *Op0, Value *Op1, bool IsAnd) {
+ // Look through casts of the 'and' operands to find compares.
+ auto *Cast0 = dyn_cast<CastInst>(Op0);
+ auto *Cast1 = dyn_cast<CastInst>(Op1);
+ if (Cast0 && Cast1 && Cast0->getOpcode() == Cast1->getOpcode() &&
+ Cast0->getSrcTy() == Cast1->getSrcTy()) {
+ Op0 = Cast0->getOperand(0);
+ Op1 = Cast1->getOperand(0);
+ }
+
+ auto *Cmp0 = dyn_cast<ICmpInst>(Op0);
+ auto *Cmp1 = dyn_cast<ICmpInst>(Op1);
+ if (!Cmp0 || !Cmp1)
+ return nullptr;
+
+ if (Value *V = simplifyPossiblyCastedAndOrOfICmps(Cmp0, Cmp1, IsAnd, Cast0))
+ return V;
+ if (Value *V = simplifyPossiblyCastedAndOrOfICmps(Cmp1, Cmp0, IsAnd, Cast0))
+ return V;
+
+ return nullptr;
+}
+
/// Given operands for an And, see if we can fold the result.
/// If not, this returns null.
static Value *SimplifyAndInst(Value *Op0, Value *Op1, const SimplifyQuery &Q,
@@ -1615,32 +1763,8 @@ static Value *SimplifyAndInst(Value *Op0, Value *Op1, const SimplifyQuery &Q,
return Op1;
}
- if (auto *ICILHS = dyn_cast<ICmpInst>(Op0)) {
- if (auto *ICIRHS = dyn_cast<ICmpInst>(Op1)) {
- if (Value *V = SimplifyAndOfICmps(ICILHS, ICIRHS))
- return V;
- if (Value *V = SimplifyAndOfICmps(ICIRHS, ICILHS))
- return V;
- }
- }
-
- // The compares may be hidden behind casts. Look through those and try the
- // same folds as above.
- auto *Cast0 = dyn_cast<CastInst>(Op0);
- auto *Cast1 = dyn_cast<CastInst>(Op1);
- if (Cast0 && Cast1 && Cast0->getOpcode() == Cast1->getOpcode() &&
- Cast0->getSrcTy() == Cast1->getSrcTy()) {
- auto *Cmp0 = dyn_cast<ICmpInst>(Cast0->getOperand(0));
- auto *Cmp1 = dyn_cast<ICmpInst>(Cast1->getOperand(0));
- if (Cmp0 && Cmp1) {
- Instruction::CastOps CastOpc = Cast0->getOpcode();
- Type *ResultType = Cast0->getType();
- if (auto *V = dyn_cast_or_null<Constant>(SimplifyAndOfICmps(Cmp0, Cmp1)))
- return ConstantExpr::getCast(CastOpc, V, ResultType);
- if (auto *V = dyn_cast_or_null<Constant>(SimplifyAndOfICmps(Cmp1, Cmp0)))
- return ConstantExpr::getCast(CastOpc, V, ResultType);
- }
- }
+ if (Value *V = simplifyAndOrOfICmps(Op0, Op1, true))
+ return V;
// Try some generic simplifications for associative operations.
if (Value *V = SimplifyAssociativeBinOp(Instruction::And, Op0, Op1, Q,
@@ -1678,86 +1802,6 @@ Value *llvm::SimplifyAndInst(Value *Op0, Value *Op1, const SimplifyQuery &Q) {
return ::SimplifyAndInst(Op0, Op1, Q, RecursionLimit);
}
-/// Commuted variants are assumed to be handled by calling this function again
-/// with the parameters swapped.
-static Value *simplifyOrOfICmpsWithSameOperands(ICmpInst *Op0, ICmpInst *Op1) {
- ICmpInst::Predicate Pred0, Pred1;
- Value *A ,*B;
- if (!match(Op0, m_ICmp(Pred0, m_Value(A), m_Value(B))) ||
- !match(Op1, m_ICmp(Pred1, m_Specific(A), m_Specific(B))))
- return nullptr;
-
- // We have (icmp Pred0, A, B) | (icmp Pred1, A, B).
- // If Op1 is always implied true by Op0, then Op0 is a subset of Op1, and we
- // can eliminate Op0 from this 'or'.
- if (ICmpInst::isImpliedTrueByMatchingCmp(Pred0, Pred1))
- return Op1;
-
- // Check for any combination of predicates that cover the entire range of
- // possibilities.
- if ((Pred0 == ICmpInst::getInversePredicate(Pred1)) ||
- (Pred0 == ICmpInst::ICMP_NE && ICmpInst::isTrueWhenEqual(Pred1)) ||
- (Pred0 == ICmpInst::ICMP_SLE && Pred1 == ICmpInst::ICMP_SGE) ||
- (Pred0 == ICmpInst::ICMP_ULE && Pred1 == ICmpInst::ICMP_UGE))
- return getTrue(Op0->getType());
-
- return nullptr;
-}
-
-/// Commuted variants are assumed to be handled by calling this function again
-/// with the parameters swapped.
-static Value *SimplifyOrOfICmps(ICmpInst *Op0, ICmpInst *Op1) {
- if (Value *X = simplifyUnsignedRangeCheck(Op0, Op1, /*IsAnd=*/false))
- return X;
-
- if (Value *X = simplifyOrOfICmpsWithSameOperands(Op0, Op1))
- return X;
-
- // (icmp (add V, C0), C1) | (icmp V, C0)
- ICmpInst::Predicate Pred0, Pred1;
- const APInt *C0, *C1;
- Value *V;
- if (!match(Op0, m_ICmp(Pred0, m_Add(m_Value(V), m_APInt(C0)), m_APInt(C1))))
- return nullptr;
-
- if (!match(Op1, m_ICmp(Pred1, m_Specific(V), m_Value())))
- return nullptr;
-
- auto *AddInst = cast<BinaryOperator>(Op0->getOperand(0));
- if (AddInst->getOperand(1) != Op1->getOperand(1))
- return nullptr;
-
- Type *ITy = Op0->getType();
- bool isNSW = AddInst->hasNoSignedWrap();
- bool isNUW = AddInst->hasNoUnsignedWrap();
-
- const APInt Delta = *C1 - *C0;
- if (C0->isStrictlyPositive()) {
- if (Delta == 2) {
- if (Pred0 == ICmpInst::ICMP_UGE && Pred1 == ICmpInst::ICMP_SLE)
- return getTrue(ITy);
- if (Pred0 == ICmpInst::ICMP_SGE && Pred1 == ICmpInst::ICMP_SLE && isNSW)
- return getTrue(ITy);
- }
- if (Delta == 1) {
- if (Pred0 == ICmpInst::ICMP_UGT && Pred1 == ICmpInst::ICMP_SLE)
- return getTrue(ITy);
- if (Pred0 == ICmpInst::ICMP_SGT && Pred1 == ICmpInst::ICMP_SLE && isNSW)
- return getTrue(ITy);
- }
- }
- if (C0->getBoolValue() && isNUW) {
- if (Delta == 2)
- if (Pred0 == ICmpInst::ICMP_UGE && Pred1 == ICmpInst::ICMP_ULE)
- return getTrue(ITy);
- if (Delta == 1)
- if (Pred0 == ICmpInst::ICMP_UGT && Pred1 == ICmpInst::ICMP_ULE)
- return getTrue(ITy);
- }
-
- return nullptr;
-}
-
/// Given operands for an Or, see if we can fold the result.
/// If not, this returns null.
static Value *SimplifyOrInst(Value *Op0, Value *Op1, const SimplifyQuery &Q,
@@ -1826,14 +1870,8 @@ static Value *SimplifyOrInst(Value *Op0, Value *Op1, const SimplifyQuery &Q,
match(Op1, m_c_And(m_Not(m_Specific(A)), m_Specific(B)))))
return Op0;
- if (auto *ICILHS = dyn_cast<ICmpInst>(Op0)) {
- if (auto *ICIRHS = dyn_cast<ICmpInst>(Op1)) {
- if (Value *V = SimplifyOrOfICmps(ICILHS, ICIRHS))
- return V;
- if (Value *V = SimplifyOrOfICmps(ICIRHS, ICILHS))
- return V;
- }
- }
+ if (Value *V = simplifyAndOrOfICmps(Op0, Op1, false))
+ return V;
// Try some generic simplifications for associative operations.
if (Value *V = SimplifyAssociativeBinOp(Instruction::Or, Op0, Op1, Q,
@@ -4056,20 +4094,13 @@ static Value *SimplifyShuffleVectorInst(Value *Op0, Value *Op1, Constant *Mask,
unsigned MaskNumElts = Mask->getType()->getVectorNumElements();
unsigned InVecNumElts = InVecTy->getVectorNumElements();
- auto *Op0Const = dyn_cast<Constant>(Op0);
- auto *Op1Const = dyn_cast<Constant>(Op1);
-
- // If all operands are constant, constant fold the shuffle.
- if (Op0Const && Op1Const)
- return ConstantFoldShuffleVectorInstruction(Op0Const, Op1Const, Mask);
-
SmallVector<int, 32> Indices;
ShuffleVectorInst::getShuffleMask(Mask, Indices);
assert(MaskNumElts == Indices.size() &&
"Size of Indices not same as number of mask elements?");
- // If only one of the operands is constant, constant fold the shuffle if the
- // mask does not select elements from the variable operand.
+ // Canonicalization: If mask does not select elements from an input vector,
+ // replace that input vector with undef.
bool MaskSelects0 = false, MaskSelects1 = false;
for (unsigned i = 0; i != MaskNumElts; ++i) {
if (Indices[i] == -1)
@@ -4079,23 +4110,41 @@ static Value *SimplifyShuffleVectorInst(Value *Op0, Value *Op1, Constant *Mask,
else
MaskSelects1 = true;
}
- if (!MaskSelects0 && Op1Const)
- return ConstantFoldShuffleVectorInstruction(UndefValue::get(InVecTy),
- Op1Const, Mask);
- if (!MaskSelects1 && Op0Const)
- return ConstantFoldShuffleVectorInstruction(Op0Const,
- UndefValue::get(InVecTy), Mask);
+ if (!MaskSelects0)
+ Op0 = UndefValue::get(InVecTy);
+ if (!MaskSelects1)
+ Op1 = UndefValue::get(InVecTy);
+
+ auto *Op0Const = dyn_cast<Constant>(Op0);
+ auto *Op1Const = dyn_cast<Constant>(Op1);
+
+ // If all operands are constant, constant fold the shuffle.
+ if (Op0Const && Op1Const)
+ return ConstantFoldShuffleVectorInstruction(Op0Const, Op1Const, Mask);
+
+ // Canonicalization: if only one input vector is constant, it shall be the
+ // second one.
+ if (Op0Const && !Op1Const) {
+ std::swap(Op0, Op1);
+ for (int &Idx : Indices) {
+ if (Idx == -1)
+ continue;
+ Idx = Idx < (int)InVecNumElts ? Idx + InVecNumElts : Idx - InVecNumElts;
+ assert(Idx >= 0 && Idx < (int)InVecNumElts * 2 &&
+ "shufflevector mask index out of range");
+ }
+ Mask = ConstantDataVector::get(
+ Mask->getContext(),
+ makeArrayRef(reinterpret_cast<uint32_t *>(Indices.data()),
+ MaskNumElts));
+ }
// A shuffle of a splat is always the splat itself. Legal if the shuffle's
// value type is same as the input vectors' type.
if (auto *OpShuf = dyn_cast<ShuffleVectorInst>(Op0))
- if (!MaskSelects1 && RetTy == InVecTy &&
+ if (isa<UndefValue>(Op1) && RetTy == InVecTy &&
OpShuf->getMask()->getSplatValue())
return Op0;
- if (auto *OpShuf = dyn_cast<ShuffleVectorInst>(Op1))
- if (!MaskSelects0 && RetTy == InVecTy &&
- OpShuf->getMask()->getSplatValue())
- return Op1;
// Don't fold a shuffle with undef mask elements. This may get folded in a
// better way using demanded bits or other analysis.
@@ -4595,8 +4644,8 @@ Value *llvm::SimplifyInstruction(Instruction *I, const SimplifyQuery &SQ,
unsigned BitWidth = I->getType()->getScalarSizeInBits();
KnownBits Known(BitWidth);
computeKnownBits(I, Known, Q.DL, /*Depth*/ 0, Q.AC, I, Q.DT, ORE);
- if ((Known.Zero | Known.One).isAllOnesValue())
- Result = ConstantInt::get(I->getType(), Known.One);
+ if (Known.isConstant())
+ Result = ConstantInt::get(I->getType(), Known.getConstant());
}
/// If called on unreachable code, the above logic may report that the
diff --git a/lib/Analysis/LazyValueInfo.cpp b/lib/Analysis/LazyValueInfo.cpp
index a98383eaf4aa..a2b9015a8a1d 100644
--- a/lib/Analysis/LazyValueInfo.cpp
+++ b/lib/Analysis/LazyValueInfo.cpp
@@ -142,7 +142,7 @@ public:
return Val;
}
- ConstantRange getConstantRange() const {
+ const ConstantRange &getConstantRange() const {
assert(isConstantRange() &&
"Cannot get the constant-range of a non-constant-range!");
return Range;
@@ -250,7 +250,7 @@ public:
if (NewR.isFullSet())
markOverdefined();
else
- markConstantRange(NewR);
+ markConstantRange(std::move(NewR));
}
};
@@ -1079,8 +1079,8 @@ bool LazyValueInfoImpl::solveBlockValueSelect(LVILatticeVal &BBLV,
}
if (TrueVal.isConstantRange() && FalseVal.isConstantRange()) {
- ConstantRange TrueCR = TrueVal.getConstantRange();
- ConstantRange FalseCR = FalseVal.getConstantRange();
+ const ConstantRange &TrueCR = TrueVal.getConstantRange();
+ const ConstantRange &FalseCR = FalseVal.getConstantRange();
Value *LHS = nullptr;
Value *RHS = nullptr;
SelectPatternResult SPR = matchSelectPattern(SI, LHS, RHS);
@@ -1649,7 +1649,7 @@ Constant *LazyValueInfo::getConstant(Value *V, BasicBlock *BB,
if (Result.isConstant())
return Result.getConstant();
if (Result.isConstantRange()) {
- ConstantRange CR = Result.getConstantRange();
+ const ConstantRange &CR = Result.getConstantRange();
if (const APInt *SingleVal = CR.getSingleElement())
return ConstantInt::get(V->getContext(), *SingleVal);
}
@@ -1686,7 +1686,7 @@ Constant *LazyValueInfo::getConstantOnEdge(Value *V, BasicBlock *FromBB,
if (Result.isConstant())
return Result.getConstant();
if (Result.isConstantRange()) {
- ConstantRange CR = Result.getConstantRange();
+ const ConstantRange &CR = Result.getConstantRange();
if (const APInt *SingleVal = CR.getSingleElement())
return ConstantInt::get(V->getContext(), *SingleVal);
}
@@ -1712,7 +1712,7 @@ static LazyValueInfo::Tristate getPredicateResult(unsigned Pred, Constant *C,
ConstantInt *CI = dyn_cast<ConstantInt>(C);
if (!CI) return LazyValueInfo::Unknown;
- ConstantRange CR = Result.getConstantRange();
+ const ConstantRange &CR = Result.getConstantRange();
if (Pred == ICmpInst::ICMP_EQ) {
if (!CR.contains(CI->getValue()))
return LazyValueInfo::False;
diff --git a/lib/Analysis/Lint.cpp b/lib/Analysis/Lint.cpp
index 598138246445..471ccb62970d 100644
--- a/lib/Analysis/Lint.cpp
+++ b/lib/Analysis/Lint.cpp
@@ -537,7 +537,7 @@ static bool isZero(Value *V, const DataLayout &DL, DominatorTree *DT,
unsigned BitWidth = V->getType()->getIntegerBitWidth();
KnownBits Known(BitWidth);
computeKnownBits(V, Known, DL, 0, AC, dyn_cast<Instruction>(V), DT);
- return Known.Zero.isAllOnesValue();
+ return Known.isZero();
}
// Per-component check doesn't work with zeroinitializer
@@ -558,7 +558,7 @@ static bool isZero(Value *V, const DataLayout &DL, DominatorTree *DT,
KnownBits Known(BitWidth);
computeKnownBits(Elem, Known, DL);
- if (Known.Zero.isAllOnesValue())
+ if (Known.isZero())
return true;
}
diff --git a/lib/Analysis/ModuleSummaryAnalysis.cpp b/lib/Analysis/ModuleSummaryAnalysis.cpp
index a83412506a07..99f900ae3932 100644
--- a/lib/Analysis/ModuleSummaryAnalysis.cpp
+++ b/lib/Analysis/ModuleSummaryAnalysis.cpp
@@ -37,7 +37,8 @@ using namespace llvm;
// Walk through the operands of a given User via worklist iteration and populate
// the set of GlobalValue references encountered. Invoked either on an
// Instruction or a GlobalVariable (which walks its initializer).
-static void findRefEdges(const User *CurUser, SetVector<ValueInfo> &RefEdges,
+static void findRefEdges(ModuleSummaryIndex &Index, const User *CurUser,
+ SetVector<ValueInfo> &RefEdges,
SmallPtrSet<const User *, 8> &Visited) {
SmallVector<const User *, 32> Worklist;
Worklist.push_back(CurUser);
@@ -61,7 +62,7 @@ static void findRefEdges(const User *CurUser, SetVector<ValueInfo> &RefEdges,
// the reference set unless it is a callee. Callees are handled
// specially by WriteFunction and are added to a separate list.
if (!(CS && CS.isCallee(&OI)))
- RefEdges.insert(GV);
+ RefEdges.insert(Index.getOrInsertValueInfo(GV));
continue;
}
Worklist.push_back(Operand);
@@ -198,7 +199,7 @@ computeFunctionSummary(ModuleSummaryIndex &Index, const Module &M,
if (isa<DbgInfoIntrinsic>(I))
continue;
++NumInsts;
- findRefEdges(&I, RefEdges, Visited);
+ findRefEdges(Index, &I, RefEdges, Visited);
auto CS = ImmutableCallSite(&I);
if (!CS)
continue;
@@ -239,7 +240,9 @@ computeFunctionSummary(ModuleSummaryIndex &Index, const Module &M,
// to record the call edge to the alias in that case. Eventually
// an alias summary will be created to associate the alias and
// aliasee.
- CallGraphEdges[cast<GlobalValue>(CalledValue)].updateHotness(Hotness);
+ CallGraphEdges[Index.getOrInsertValueInfo(
+ cast<GlobalValue>(CalledValue))]
+ .updateHotness(Hotness);
} else {
// Skip inline assembly calls.
if (CI && CI->isInlineAsm())
@@ -254,15 +257,16 @@ computeFunctionSummary(ModuleSummaryIndex &Index, const Module &M,
ICallAnalysis.getPromotionCandidatesForInstruction(
&I, NumVals, TotalCount, NumCandidates);
for (auto &Candidate : CandidateProfileData)
- CallGraphEdges[Candidate.Value].updateHotness(
- getHotness(Candidate.Count, PSI));
+ CallGraphEdges[Index.getOrInsertValueInfo(Candidate.Value)]
+ .updateHotness(getHotness(Candidate.Count, PSI));
}
}
// Explicit add hot edges to enforce importing for designated GUIDs for
// sample PGO, to enable the same inlines as the profiled optimized binary.
for (auto &I : F.getImportGUIDs())
- CallGraphEdges[I].updateHotness(CalleeInfo::HotnessType::Hot);
+ CallGraphEdges[Index.getOrInsertValueInfo(I)].updateHotness(
+ CalleeInfo::HotnessType::Hot);
bool NonRenamableLocal = isNonRenamableLocal(F);
bool NotEligibleForImport =
@@ -288,7 +292,7 @@ computeVariableSummary(ModuleSummaryIndex &Index, const GlobalVariable &V,
DenseSet<GlobalValue::GUID> &CantBePromoted) {
SetVector<ValueInfo> RefEdges;
SmallPtrSet<const User *, 8> Visited;
- findRefEdges(&V, RefEdges, Visited);
+ findRefEdges(Index, &V, RefEdges, Visited);
bool NonRenamableLocal = isNonRenamableLocal(V);
GlobalValueSummary::GVFlags Flags(V.getLinkage(), NonRenamableLocal,
/* LiveRoot = */ false);
@@ -317,12 +321,9 @@ computeAliasSummary(ModuleSummaryIndex &Index, const GlobalAlias &A,
// Set LiveRoot flag on entries matching the given value name.
static void setLiveRoot(ModuleSummaryIndex &Index, StringRef Name) {
- auto SummaryList =
- Index.findGlobalValueSummaryList(GlobalValue::getGUID(Name));
- if (SummaryList == Index.end())
- return;
- for (auto &Summary : SummaryList->second)
- Summary->setLiveRoot();
+ if (ValueInfo VI = Index.getValueInfo(GlobalValue::getGUID(Name)))
+ for (auto &Summary : VI.getSummaryList())
+ Summary->setLiveRoot();
}
ModuleSummaryIndex llvm::buildModuleSummaryIndex(
@@ -446,12 +447,16 @@ ModuleSummaryIndex llvm::buildModuleSummaryIndex(
}
for (auto &GlobalList : Index) {
- assert(GlobalList.second.size() == 1 &&
+ // Ignore entries for references that are undefined in the current module.
+ if (GlobalList.second.SummaryList.empty())
+ continue;
+
+ assert(GlobalList.second.SummaryList.size() == 1 &&
"Expected module's index to have one summary per GUID");
- auto &Summary = GlobalList.second[0];
+ auto &Summary = GlobalList.second.SummaryList[0];
bool AllRefsCanBeExternallyReferenced =
llvm::all_of(Summary->refs(), [&](const ValueInfo &VI) {
- return !CantBePromoted.count(VI.getValue()->getGUID());
+ return !CantBePromoted.count(VI.getGUID());
});
if (!AllRefsCanBeExternallyReferenced) {
Summary->setNotEligibleToImport();
@@ -461,9 +466,7 @@ ModuleSummaryIndex llvm::buildModuleSummaryIndex(
if (auto *FuncSummary = dyn_cast<FunctionSummary>(Summary.get())) {
bool AllCallsCanBeExternallyReferenced = llvm::all_of(
FuncSummary->calls(), [&](const FunctionSummary::EdgeTy &Edge) {
- auto GUID = Edge.first.isGUID() ? Edge.first.getGUID()
- : Edge.first.getValue()->getGUID();
- return !CantBePromoted.count(GUID);
+ return !CantBePromoted.count(Edge.first.getGUID());
});
if (!AllCallsCanBeExternallyReferenced)
Summary->setNotEligibleToImport();
diff --git a/lib/Analysis/ScalarEvolution.cpp b/lib/Analysis/ScalarEvolution.cpp
index bd747f7c0b7a..01dca0793145 100644
--- a/lib/Analysis/ScalarEvolution.cpp
+++ b/lib/Analysis/ScalarEvolution.cpp
@@ -2970,7 +2970,7 @@ static const APInt gcd(const SCEVConstant *C1, const SCEVConstant *C2) {
else if (ABW < BBW)
A = A.zext(BBW);
- return APIntOps::GreatestCommonDivisor(A, B);
+ return APIntOps::GreatestCommonDivisor(std::move(A), std::move(B));
}
/// Get a canonical unsigned division expression, or something simpler if
@@ -4083,6 +4083,56 @@ static Optional<BinaryOp> MatchBinaryOp(Value *V, DominatorTree &DT) {
return None;
}
+/// A helper function for createAddRecFromPHI to handle simple cases.
+///
+/// This function tries to find an AddRec expression for the simplest (yet most
+/// common) cases: PN = PHI(Start, OP(Self, LoopInvariant)).
+/// If it fails, createAddRecFromPHI will use a more general, but slow,
+/// technique for finding the AddRec expression.
+const SCEV *ScalarEvolution::createSimpleAffineAddRec(PHINode *PN,
+ Value *BEValueV,
+ Value *StartValueV) {
+ const Loop *L = LI.getLoopFor(PN->getParent());
+ assert(L && L->getHeader() == PN->getParent());
+ assert(BEValueV && StartValueV);
+
+ auto BO = MatchBinaryOp(BEValueV, DT);
+ if (!BO)
+ return nullptr;
+
+ if (BO->Opcode != Instruction::Add)
+ return nullptr;
+
+ const SCEV *Accum = nullptr;
+ if (BO->LHS == PN && L->isLoopInvariant(BO->RHS))
+ Accum = getSCEV(BO->RHS);
+ else if (BO->RHS == PN && L->isLoopInvariant(BO->LHS))
+ Accum = getSCEV(BO->LHS);
+
+ if (!Accum)
+ return nullptr;
+
+ SCEV::NoWrapFlags Flags = SCEV::FlagAnyWrap;
+ if (BO->IsNUW)
+ Flags = setFlags(Flags, SCEV::FlagNUW);
+ if (BO->IsNSW)
+ Flags = setFlags(Flags, SCEV::FlagNSW);
+
+ const SCEV *StartVal = getSCEV(StartValueV);
+ const SCEV *PHISCEV = getAddRecExpr(StartVal, Accum, L, Flags);
+
+ ValueExprMap[SCEVCallbackVH(PN, this)] = PHISCEV;
+
+ // We can add Flags to the post-inc expression only if we
+ // know that it is *undefined behavior* for BEValueV to
+ // overflow.
+ if (auto *BEInst = dyn_cast<Instruction>(BEValueV))
+ if (isLoopInvariant(Accum, L) && isAddRecNeverPoison(BEInst, L))
+ (void)getAddRecExpr(getAddExpr(StartVal, Accum), Accum, L, Flags);
+
+ return PHISCEV;
+}
+
const SCEV *ScalarEvolution::createAddRecFromPHI(PHINode *PN) {
const Loop *L = LI.getLoopFor(PN->getParent());
if (!L || L->getHeader() != PN->getParent())
@@ -4111,10 +4161,16 @@ const SCEV *ScalarEvolution::createAddRecFromPHI(PHINode *PN) {
if (!BEValueV || !StartValueV)
return nullptr;
- // While we are analyzing this PHI node, handle its value symbolically.
- const SCEV *SymbolicName = getUnknown(PN);
assert(ValueExprMap.find_as(PN) == ValueExprMap.end() &&
"PHI node already processed?");
+
+ // First, try to find AddRec expression without creating a fictituos symbolic
+ // value for PN.
+ if (auto *S = createSimpleAffineAddRec(PN, BEValueV, StartValueV))
+ return S;
+
+ // Handle PHI node value symbolically.
+ const SCEV *SymbolicName = getUnknown(PN);
ValueExprMap.insert({SCEVCallbackVH(PN, this), SymbolicName});
// Using this symbolic name for the PHI, analyze the value coming around
@@ -4189,7 +4245,7 @@ const SCEV *ScalarEvolution::createAddRecFromPHI(PHINode *PN) {
ValueExprMap[SCEVCallbackVH(PN, this)] = PHISCEV;
// We can add Flags to the post-inc expression only if we
- // know that it us *undefined behavior* for BEValueV to
+ // know that it is *undefined behavior* for BEValueV to
// overflow.
if (auto *BEInst = dyn_cast<Instruction>(BEValueV))
if (isLoopInvariant(Accum, L) && isAddRecNeverPoison(BEInst, L))
@@ -4744,7 +4800,7 @@ ScalarEvolution::getRange(const SCEV *S,
}
}
- return setRange(AddRec, SignHint, ConservativeResult);
+ return setRange(AddRec, SignHint, std::move(ConservativeResult));
}
if (const SCEVUnknown *U = dyn_cast<SCEVUnknown>(S)) {
@@ -4775,10 +4831,10 @@ ScalarEvolution::getRange(const SCEV *S,
APInt::getSignedMaxValue(BitWidth).ashr(NS - 1) + 1));
}
- return setRange(U, SignHint, ConservativeResult);
+ return setRange(U, SignHint, std::move(ConservativeResult));
}
- return setRange(S, SignHint, ConservativeResult);
+ return setRange(S, SignHint, std::move(ConservativeResult));
}
// Given a StartRange, Step and MaxBECount for an expression compute a range of
@@ -4786,8 +4842,8 @@ ScalarEvolution::getRange(const SCEV *S,
// from StartRange and then is changed by Step up to MaxBECount times. Signed
// argument defines if we treat Step as signed or unsigned.
static ConstantRange getRangeForAffineARHelper(APInt Step,
- ConstantRange StartRange,
- APInt MaxBECount,
+ const ConstantRange &StartRange,
+ const APInt &MaxBECount,
unsigned BitWidth, bool Signed) {
// If either Step or MaxBECount is 0, then the expression won't change, and we
// just need to return the initial range.
@@ -4826,8 +4882,8 @@ static ConstantRange getRangeForAffineARHelper(APInt Step,
// if the expression is decreasing and will be increased by Offset otherwise.
APInt StartLower = StartRange.getLower();
APInt StartUpper = StartRange.getUpper() - 1;
- APInt MovedBoundary =
- Descending ? (StartLower - Offset) : (StartUpper + Offset);
+ APInt MovedBoundary = Descending ? (StartLower - std::move(Offset))
+ : (StartUpper + std::move(Offset));
// It's possible that the new minimum/maximum value will fall into the initial
// range (due to wrap around). This means that the expression can take any
@@ -4835,21 +4891,18 @@ static ConstantRange getRangeForAffineARHelper(APInt Step,
if (StartRange.contains(MovedBoundary))
return ConstantRange(BitWidth, /* isFullSet = */ true);
- APInt NewLower, NewUpper;
- if (Descending) {
- NewLower = MovedBoundary;
- NewUpper = StartUpper;
- } else {
- NewLower = StartLower;
- NewUpper = MovedBoundary;
- }
+ APInt NewLower =
+ Descending ? std::move(MovedBoundary) : std::move(StartLower);
+ APInt NewUpper =
+ Descending ? std::move(StartUpper) : std::move(MovedBoundary);
+ NewUpper += 1;
// If we end up with full range, return a proper full range.
- if (NewLower == NewUpper + 1)
+ if (NewLower == NewUpper)
return ConstantRange(BitWidth, /* isFullSet = */ true);
// No overflow detected, return [StartLower, StartUpper + Offset + 1) range.
- return ConstantRange(NewLower, NewUpper + 1);
+ return ConstantRange(std::move(NewLower), std::move(NewUpper));
}
ConstantRange ScalarEvolution::getRangeForAffineAR(const SCEV *Start,
@@ -7323,7 +7376,6 @@ SolveQuadraticEquation(const SCEVAddRecExpr *AddRec, ScalarEvolution &SE) {
const APInt &M = MC->getAPInt();
const APInt &N = NC->getAPInt();
APInt Two(BitWidth, 2);
- APInt Four(BitWidth, 4);
{
using namespace APIntOps;
@@ -7339,7 +7391,7 @@ SolveQuadraticEquation(const SCEVAddRecExpr *AddRec, ScalarEvolution &SE) {
// Compute the B^2-4ac term.
APInt SqrtTerm(B);
SqrtTerm *= B;
- SqrtTerm -= Four * (A * C);
+ SqrtTerm -= 4 * (A * C);
if (SqrtTerm.isNegative()) {
// The loop is provably infinite.
@@ -8887,7 +8939,7 @@ bool ScalarEvolution::isImpliedCondOperandsViaRanges(ICmpInst::Predicate Pred,
if (!Addend)
return false;
- APInt ConstFoundRHS = cast<SCEVConstant>(FoundRHS)->getAPInt();
+ const APInt &ConstFoundRHS = cast<SCEVConstant>(FoundRHS)->getAPInt();
// `FoundLHSRange` is the range we know `FoundLHS` to be in by virtue of the
// antecedent "`FoundLHS` `Pred` `FoundRHS`".
@@ -8899,7 +8951,7 @@ bool ScalarEvolution::isImpliedCondOperandsViaRanges(ICmpInst::Predicate Pred,
// We can also compute the range of values for `LHS` that satisfy the
// consequent, "`LHS` `Pred` `RHS`":
- APInt ConstRHS = cast<SCEVConstant>(RHS)->getAPInt();
+ const APInt &ConstRHS = cast<SCEVConstant>(RHS)->getAPInt();
ConstantRange SatisfyingLHSRange =
ConstantRange::makeSatisfyingICmpRegion(Pred, ConstRHS);
@@ -8924,7 +8976,7 @@ bool ScalarEvolution::doesIVOverflowOnLT(const SCEV *RHS, const SCEV *Stride,
.getSignedMax();
// SMaxRHS + SMaxStrideMinusOne > SMaxValue => overflow!
- return (MaxValue - MaxStrideMinusOne).slt(MaxRHS);
+ return (std::move(MaxValue) - std::move(MaxStrideMinusOne)).slt(MaxRHS);
}
APInt MaxRHS = getUnsignedRange(RHS).getUnsignedMax();
@@ -8933,7 +8985,7 @@ bool ScalarEvolution::doesIVOverflowOnLT(const SCEV *RHS, const SCEV *Stride,
.getUnsignedMax();
// UMaxRHS + UMaxStrideMinusOne > UMaxValue => overflow!
- return (MaxValue - MaxStrideMinusOne).ult(MaxRHS);
+ return (std::move(MaxValue) - std::move(MaxStrideMinusOne)).ult(MaxRHS);
}
bool ScalarEvolution::doesIVOverflowOnGT(const SCEV *RHS, const SCEV *Stride,
@@ -8950,7 +9002,7 @@ bool ScalarEvolution::doesIVOverflowOnGT(const SCEV *RHS, const SCEV *Stride,
.getSignedMax();
// SMinRHS - SMaxStrideMinusOne < SMinValue => overflow!
- return (MinValue + MaxStrideMinusOne).sgt(MinRHS);
+ return (std::move(MinValue) + std::move(MaxStrideMinusOne)).sgt(MinRHS);
}
APInt MinRHS = getUnsignedRange(RHS).getUnsignedMin();
@@ -8959,7 +9011,7 @@ bool ScalarEvolution::doesIVOverflowOnGT(const SCEV *RHS, const SCEV *Stride,
.getUnsignedMax();
// UMinRHS - UMaxStrideMinusOne < UMinValue => overflow!
- return (MinValue + MaxStrideMinusOne).ugt(MinRHS);
+ return (std::move(MinValue) + std::move(MaxStrideMinusOne)).ugt(MinRHS);
}
const SCEV *ScalarEvolution::computeBECount(const SCEV *Delta, const SCEV *Step,
@@ -9250,9 +9302,8 @@ const SCEV *SCEVAddRecExpr::getNumIterationsInRange(const ConstantRange &Range,
// the upper value of the range must be the first possible exit value.
// If A is negative then the lower of the range is the last possible loop
// value. Also note that we already checked for a full range.
- APInt One(BitWidth,1);
APInt A = cast<SCEVConstant>(getOperand(1))->getAPInt();
- APInt End = A.sge(One) ? (Range.getUpper() - One) : Range.getLower();
+ APInt End = A.sge(1) ? (Range.getUpper() - 1) : Range.getLower();
// The exit value should be (End+A)/A.
APInt ExitVal = (End + A).udiv(A);
@@ -9268,7 +9319,7 @@ const SCEV *SCEVAddRecExpr::getNumIterationsInRange(const ConstantRange &Range,
// Ensure that the previous value is in the range. This is a sanity check.
assert(Range.contains(
EvaluateConstantChrecAtConstant(this,
- ConstantInt::get(SE.getContext(), ExitVal - One), SE)->getValue()) &&
+ ConstantInt::get(SE.getContext(), ExitVal - 1), SE)->getValue()) &&
"Linear scev computation is off in a bad way!");
return SE.getConstant(ExitValue);
} else if (isQuadratic()) {
@@ -9574,7 +9625,7 @@ const SCEV *ScalarEvolution::getElementSize(Instruction *Inst) {
void ScalarEvolution::findArrayDimensions(SmallVectorImpl<const SCEV *> &Terms,
SmallVectorImpl<const SCEV *> &Sizes,
- const SCEV *ElementSize) const {
+ const SCEV *ElementSize) {
if (Terms.size() < 1 || !ElementSize)
return;
@@ -9590,7 +9641,7 @@ void ScalarEvolution::findArrayDimensions(SmallVectorImpl<const SCEV *> &Terms,
});
// Remove duplicates.
- std::sort(Terms.begin(), Terms.end());
+ array_pod_sort(Terms.begin(), Terms.end());
Terms.erase(std::unique(Terms.begin(), Terms.end()), Terms.end());
// Put larger terms first.
@@ -9598,13 +9649,11 @@ void ScalarEvolution::findArrayDimensions(SmallVectorImpl<const SCEV *> &Terms,
return numberOfTerms(LHS) > numberOfTerms(RHS);
});
- ScalarEvolution &SE = *const_cast<ScalarEvolution *>(this);
-
// Try to divide all terms by the element size. If term is not divisible by
// element size, proceed with the original term.
for (const SCEV *&Term : Terms) {
const SCEV *Q, *R;
- SCEVDivision::divide(SE, Term, ElementSize, &Q, &R);
+ SCEVDivision::divide(*this, Term, ElementSize, &Q, &R);
if (!Q->isZero())
Term = Q;
}
@@ -9613,7 +9662,7 @@ void ScalarEvolution::findArrayDimensions(SmallVectorImpl<const SCEV *> &Terms,
// Remove constant factors.
for (const SCEV *T : Terms)
- if (const SCEV *NewT = removeConstantFactors(SE, T))
+ if (const SCEV *NewT = removeConstantFactors(*this, T))
NewTerms.push_back(NewT);
DEBUG({
@@ -9622,8 +9671,7 @@ void ScalarEvolution::findArrayDimensions(SmallVectorImpl<const SCEV *> &Terms,
dbgs() << *T << "\n";
});
- if (NewTerms.empty() ||
- !findArrayDimensionsRec(SE, NewTerms, Sizes)) {
+ if (NewTerms.empty() || !findArrayDimensionsRec(*this, NewTerms, Sizes)) {
Sizes.clear();
return;
}
diff --git a/lib/Analysis/TargetLibraryInfo.cpp b/lib/Analysis/TargetLibraryInfo.cpp
index be734fa91425..848e1b4717b5 100644
--- a/lib/Analysis/TargetLibraryInfo.cpp
+++ b/lib/Analysis/TargetLibraryInfo.cpp
@@ -1176,6 +1176,10 @@ bool TargetLibraryInfoImpl::isValidProtoForLibFunc(const FunctionType &FTy,
FTy.getParamType(0)->isPointerTy() &&
FTy.getParamType(1) == SizeTTy && FTy.getParamType(2) == SizeTTy);
+ case LibFunc_wcslen:
+ return (NumParams == 1 && FTy.getParamType(0)->isPointerTy() &&
+ FTy.getReturnType()->isIntegerTy());
+
case LibFunc::NumLibFuncs:
break;
}
diff --git a/lib/Analysis/ValueTracking.cpp b/lib/Analysis/ValueTracking.cpp
index 6ec175fc84e2..a7f3ff672aef 100644
--- a/lib/Analysis/ValueTracking.cpp
+++ b/lib/Analysis/ValueTracking.cpp
@@ -59,8 +59,8 @@ static cl::opt<bool>
DontImproveNonNegativePhiBits("dont-improve-non-negative-phi-bits",
cl::Hidden, cl::init(true));
-/// Returns the bitwidth of the given scalar or pointer type (if unknown returns
-/// 0). For vector types, returns the element type's bitwidth.
+/// Returns the bitwidth of the given scalar or pointer type. For vector types,
+/// returns the element type's bitwidth.
static unsigned getBitWidth(Type *Ty, const DataLayout &DL) {
if (unsigned BitWidth = Ty->getScalarSizeInBits())
return BitWidth;
@@ -342,7 +342,6 @@ static void computeKnownBitsMul(const Value *Op0, const Value *Op1, bool NSW,
// Also compute a conservative estimate for high known-0 bits.
// More trickiness is possible, but this is sufficient for the
// interesting case of alignment computation.
- Known.One.clearAllBits();
unsigned TrailZ = Known.Zero.countTrailingOnes() +
Known2.Zero.countTrailingOnes();
unsigned LeadZ = std::max(Known.Zero.countLeadingOnes() +
@@ -351,7 +350,7 @@ static void computeKnownBitsMul(const Value *Op0, const Value *Op1, bool NSW,
TrailZ = std::min(TrailZ, BitWidth);
LeadZ = std::min(LeadZ, BitWidth);
- Known.Zero.clearAllBits();
+ Known.resetAll();
Known.Zero.setLowBits(TrailZ);
Known.Zero.setHighBits(LeadZ);
@@ -529,15 +528,13 @@ static void computeKnownBitsFromAssume(const Value *V, KnownBits &Known,
if (Arg == V && isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
assert(BitWidth == 1 && "assume operand is not i1?");
- Known.Zero.clearAllBits();
- Known.One.setAllBits();
+ Known.setAllOnes();
return;
}
if (match(Arg, m_Not(m_Specific(V))) &&
isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
assert(BitWidth == 1 && "assume operand is not i1?");
- Known.Zero.setAllBits();
- Known.One.clearAllBits();
+ Known.setAllZero();
return;
}
@@ -719,7 +716,7 @@ static void computeKnownBitsFromAssume(const Value *V, KnownBits &Known,
KnownBits RHSKnown(BitWidth);
computeKnownBits(A, RHSKnown, Depth+1, Query(Q, I));
- if (RHSKnown.One.isAllOnesValue() || RHSKnown.isNonNegative()) {
+ if (RHSKnown.isAllOnes() || RHSKnown.isNonNegative()) {
// We know that the sign bit is zero.
Known.makeNonNegative();
}
@@ -741,7 +738,7 @@ static void computeKnownBitsFromAssume(const Value *V, KnownBits &Known,
KnownBits RHSKnown(BitWidth);
computeKnownBits(A, RHSKnown, Depth+1, Query(Q, I));
- if (RHSKnown.Zero.isAllOnesValue() || RHSKnown.isNegative()) {
+ if (RHSKnown.isZero() || RHSKnown.isNegative()) {
// We know that the sign bit is one.
Known.makeNegative();
}
@@ -776,8 +773,7 @@ static void computeKnownBitsFromAssume(const Value *V, KnownBits &Known,
// behavior, or we might have a bug in the compiler. We can't assert/crash, so
// clear out the known bits, try to warn the user, and hope for the best.
if (Known.Zero.intersects(Known.One)) {
- Known.Zero.clearAllBits();
- Known.One.clearAllBits();
+ Known.resetAll();
if (Q.ORE) {
auto *CxtI = const_cast<Instruction *>(Q.CxtI);
@@ -813,10 +809,8 @@ static void computeKnownBitsFromShiftOperator(
// If there is conflict between Known.Zero and Known.One, this must be an
// overflowing left shift, so the shift result is undefined. Clear Known
// bits so that other code could propagate this undef.
- if ((Known.Zero & Known.One) != 0) {
- Known.Zero.clearAllBits();
- Known.One.clearAllBits();
- }
+ if ((Known.Zero & Known.One) != 0)
+ Known.resetAll();
return;
}
@@ -826,8 +820,7 @@ static void computeKnownBitsFromShiftOperator(
// If the shift amount could be greater than or equal to the bit-width of the LHS, the
// value could be undef, so we don't know anything about it.
if ((~Known.Zero).uge(BitWidth)) {
- Known.Zero.clearAllBits();
- Known.One.clearAllBits();
+ Known.resetAll();
return;
}
@@ -839,8 +832,7 @@ static void computeKnownBitsFromShiftOperator(
// It would be more-clearly correct to use the two temporaries for this
// calculation. Reusing the APInts here to prevent unnecessary allocations.
- Known.Zero.clearAllBits();
- Known.One.clearAllBits();
+ Known.resetAll();
// If we know the shifter operand is nonzero, we can sometimes infer more
// known bits. However this is expensive to compute, so be lazy about it and
@@ -886,10 +878,8 @@ static void computeKnownBitsFromShiftOperator(
// return anything we'd like, but we need to make sure the sets of known bits
// stay disjoint (it should be better for some other code to actually
// propagate the undef than to pick a value here using known bits).
- if (Known.Zero.intersects(Known.One)) {
- Known.Zero.clearAllBits();
- Known.One.clearAllBits();
- }
+ if (Known.Zero.intersects(Known.One))
+ Known.resetAll();
}
static void computeKnownBitsFromOperator(const Operator *I, KnownBits &Known,
@@ -924,7 +914,7 @@ static void computeKnownBitsFromOperator(const Operator *I, KnownBits &Known,
m_Value(Y))) ||
match(I->getOperand(1), m_Add(m_Specific(I->getOperand(0)),
m_Value(Y))))) {
- Known2.Zero.clearAllBits(); Known2.One.clearAllBits();
+ Known2.resetAll();
computeKnownBits(Y, Known2, Depth + 1, Q);
if (Known2.One.countTrailingOnes() > 0)
Known.Zero.setBit(0);
@@ -965,8 +955,7 @@ static void computeKnownBitsFromOperator(const Operator *I, KnownBits &Known,
computeKnownBits(I->getOperand(0), Known2, Depth + 1, Q);
unsigned LeadZ = Known2.Zero.countLeadingOnes();
- Known2.One.clearAllBits();
- Known2.Zero.clearAllBits();
+ Known2.resetAll();
computeKnownBits(I->getOperand(1), Known2, Depth + 1, Q);
unsigned RHSUnknownLeadingOnes = Known2.One.countLeadingZeros();
if (RHSUnknownLeadingOnes != BitWidth)
@@ -1051,11 +1040,9 @@ static void computeKnownBitsFromOperator(const Operator *I, KnownBits &Known,
SrcBitWidth = Q.DL.getTypeSizeInBits(SrcTy->getScalarType());
assert(SrcBitWidth && "SrcBitWidth can't be zero");
- Known.Zero = Known.Zero.zextOrTrunc(SrcBitWidth);
- Known.One = Known.One.zextOrTrunc(SrcBitWidth);
+ Known = Known.zextOrTrunc(SrcBitWidth);
computeKnownBits(I->getOperand(0), Known, Depth + 1, Q);
- Known.Zero = Known.Zero.zextOrTrunc(BitWidth);
- Known.One = Known.One.zextOrTrunc(BitWidth);
+ Known = Known.zextOrTrunc(BitWidth);
// Any top bits are known to be zero.
if (BitWidth > SrcBitWidth)
Known.Zero.setBitsFrom(SrcBitWidth);
@@ -1076,13 +1063,11 @@ static void computeKnownBitsFromOperator(const Operator *I, KnownBits &Known,
// Compute the bits in the result that are not present in the input.
unsigned SrcBitWidth = I->getOperand(0)->getType()->getScalarSizeInBits();
- Known.Zero = Known.Zero.trunc(SrcBitWidth);
- Known.One = Known.One.trunc(SrcBitWidth);
+ Known = Known.trunc(SrcBitWidth);
computeKnownBits(I->getOperand(0), Known, Depth + 1, Q);
// If the sign bit of the input is known set or clear, then we know the
// top bits of the result.
- Known.Zero = Known.Zero.sext(BitWidth);
- Known.One = Known.One.sext(BitWidth);
+ Known = Known.sext(BitWidth);
break;
}
case Instruction::Shl: {
@@ -1202,8 +1187,7 @@ static void computeKnownBitsFromOperator(const Operator *I, KnownBits &Known,
unsigned Leaders = std::max(Known.Zero.countLeadingOnes(),
Known2.Zero.countLeadingOnes());
- Known.One.clearAllBits();
- Known.Zero.clearAllBits();
+ Known.resetAll();
Known.Zero.setHighBits(Leaders);
break;
}
@@ -1504,8 +1488,7 @@ void computeKnownBits(const Value *V, KnownBits &Known, unsigned Depth,
}
// Null and aggregate-zero are all-zeros.
if (isa<ConstantPointerNull>(V) || isa<ConstantAggregateZero>(V)) {
- Known.One.clearAllBits();
- Known.Zero.setAllBits();
+ Known.setAllZero();
return;
}
// Handle a constant vector by taking the intersection of the known bits of
@@ -1532,8 +1515,7 @@ void computeKnownBits(const Value *V, KnownBits &Known, unsigned Depth,
Constant *Element = CV->getAggregateElement(i);
auto *ElementCI = dyn_cast_or_null<ConstantInt>(Element);
if (!ElementCI) {
- Known.Zero.clearAllBits();
- Known.One.clearAllBits();
+ Known.resetAll();
return;
}
Elt = ElementCI->getValue();
@@ -1544,7 +1526,7 @@ void computeKnownBits(const Value *V, KnownBits &Known, unsigned Depth,
}
// Start out not knowing anything.
- Known.Zero.clearAllBits(); Known.One.clearAllBits();
+ Known.resetAll();
// We can't imply anything about undefs.
if (isa<UndefValue>(V))
@@ -1590,13 +1572,7 @@ void computeKnownBits(const Value *V, KnownBits &Known, unsigned Depth,
/// Convenience wrapper around computeKnownBits.
void ComputeSignBit(const Value *V, bool &KnownZero, bool &KnownOne,
unsigned Depth, const Query &Q) {
- unsigned BitWidth = getBitWidth(V->getType(), Q.DL);
- if (!BitWidth) {
- KnownZero = false;
- KnownOne = false;
- return;
- }
- KnownBits Bits(BitWidth);
+ KnownBits Bits(getBitWidth(V->getType(), Q.DL));
computeKnownBits(V, Bits, Depth, Q);
KnownOne = Bits.isNegative();
KnownZero = Bits.isNonNegative();
@@ -1847,7 +1823,7 @@ bool isKnownNonZero(const Value *V, unsigned Depth, const Query &Q) {
// shl X, Y != 0 if X is odd. Note that the value of the shift is undefined
// if the lowest bit is shifted off the end.
- if (BitWidth && match(V, m_Shl(m_Value(X), m_Value(Y)))) {
+ if (match(V, m_Shl(m_Value(X), m_Value(Y)))) {
// shl nuw can't remove any non-zero bits.
const OverflowingBinaryOperator *BO = cast<OverflowingBinaryOperator>(V);
if (BO->hasNoUnsignedWrap())
@@ -1906,7 +1882,7 @@ bool isKnownNonZero(const Value *V, unsigned Depth, const Query &Q) {
// If X and Y are both negative (as signed values) then their sum is not
// zero unless both X and Y equal INT_MIN.
- if (BitWidth && XKnownNegative && YKnownNegative) {
+ if (XKnownNegative && YKnownNegative) {
KnownBits Known(BitWidth);
APInt Mask = APInt::getSignedMaxValue(BitWidth);
// The sign bit of X is set. If some other bit is set then X is not equal
@@ -1971,7 +1947,6 @@ bool isKnownNonZero(const Value *V, unsigned Depth, const Query &Q) {
return true;
}
- if (!BitWidth) return false;
KnownBits Known(BitWidth);
computeKnownBits(V, Known, Depth, Q);
return Known.One != 0;
diff --git a/lib/Bitcode/Reader/BitcodeReader.cpp b/lib/Bitcode/Reader/BitcodeReader.cpp
index 8b6f79a81b93..580261a3b5e0 100644
--- a/lib/Bitcode/Reader/BitcodeReader.cpp
+++ b/lib/Bitcode/Reader/BitcodeReader.cpp
@@ -694,15 +694,16 @@ class ModuleSummaryIndexBitcodeReader : public BitcodeReaderBase {
/// Used to enable on-demand parsing of the VST.
uint64_t VSTOffset = 0;
- // Map to save ValueId to GUID association that was recorded in the
+ // Map to save ValueId to ValueInfo association that was recorded in the
// ValueSymbolTable. It is used after the VST is parsed to convert
// call graph edges read from the function summary from referencing
- // callees by their ValueId to using the GUID instead, which is how
+ // callees by their ValueId to using the ValueInfo instead, which is how
// they are recorded in the summary index being built.
- // We save a second GUID which is the same as the first one, but ignoring the
- // linkage, i.e. for value other than local linkage they are identical.
- DenseMap<unsigned, std::pair<GlobalValue::GUID, GlobalValue::GUID>>
- ValueIdToCallGraphGUIDMap;
+ // We save a GUID which refers to the same global as the ValueInfo, but
+ // ignoring the linkage, i.e. for values other than local linkage they are
+ // identical.
+ DenseMap<unsigned, std::pair<ValueInfo, GlobalValue::GUID>>
+ ValueIdToValueInfoMap;
/// Map populated during module path string table parsing, from the
/// module ID to a string reference owned by the index's module
@@ -742,8 +743,8 @@ private:
Error parseEntireSummary();
Error parseModuleStringTable();
- std::pair<GlobalValue::GUID, GlobalValue::GUID>
- getGUIDFromValueId(unsigned ValueId);
+ std::pair<ValueInfo, GlobalValue::GUID>
+ getValueInfoFromValueId(unsigned ValueId);
ModulePathStringTableTy::iterator addThisModulePath();
};
@@ -4697,11 +4698,11 @@ ModuleSummaryIndexBitcodeReader::addThisModulePath() {
return TheIndex.addModulePath(ModulePath, ModuleId);
}
-std::pair<GlobalValue::GUID, GlobalValue::GUID>
-ModuleSummaryIndexBitcodeReader::getGUIDFromValueId(unsigned ValueId) {
- auto VGI = ValueIdToCallGraphGUIDMap.find(ValueId);
- assert(VGI != ValueIdToCallGraphGUIDMap.end());
- return VGI->second;
+std::pair<ValueInfo, GlobalValue::GUID>
+ModuleSummaryIndexBitcodeReader::getValueInfoFromValueId(unsigned ValueId) {
+ auto VGI = ValueIdToValueInfoMap[ValueId];
+ assert(VGI.first);
+ return VGI;
}
void ModuleSummaryIndexBitcodeReader::setValueGUID(
@@ -4716,8 +4717,8 @@ void ModuleSummaryIndexBitcodeReader::setValueGUID(
if (PrintSummaryGUIDs)
dbgs() << "GUID " << ValueGUID << "(" << OriginalNameID << ") is "
<< ValueName << "\n";
- ValueIdToCallGraphGUIDMap[ValueID] =
- std::make_pair(ValueGUID, OriginalNameID);
+ ValueIdToValueInfoMap[ValueID] =
+ std::make_pair(TheIndex.getOrInsertValueInfo(ValueGUID), OriginalNameID);
}
// Specialized value symbol table parser used when reading module index
@@ -4795,7 +4796,8 @@ Error ModuleSummaryIndexBitcodeReader::parseValueSymbolTable(
GlobalValue::GUID RefGUID = Record[1];
// The "original name", which is the second value of the pair will be
// overriden later by a FS_COMBINED_ORIGINAL_NAME in the combined index.
- ValueIdToCallGraphGUIDMap[ValueID] = std::make_pair(RefGUID, RefGUID);
+ ValueIdToValueInfoMap[ValueID] =
+ std::make_pair(TheIndex.getOrInsertValueInfo(RefGUID), RefGUID);
break;
}
}
@@ -4940,7 +4942,7 @@ ModuleSummaryIndexBitcodeReader::makeRefList(ArrayRef<uint64_t> Record) {
std::vector<ValueInfo> Ret;
Ret.reserve(Record.size());
for (uint64_t RefValueId : Record)
- Ret.push_back(getGUIDFromValueId(RefValueId).first);
+ Ret.push_back(getValueInfoFromValueId(RefValueId).first);
return Ret;
}
@@ -4950,14 +4952,14 @@ std::vector<FunctionSummary::EdgeTy> ModuleSummaryIndexBitcodeReader::makeCallLi
Ret.reserve(Record.size());
for (unsigned I = 0, E = Record.size(); I != E; ++I) {
CalleeInfo::HotnessType Hotness = CalleeInfo::HotnessType::Unknown;
- GlobalValue::GUID CalleeGUID = getGUIDFromValueId(Record[I]).first;
+ ValueInfo Callee = getValueInfoFromValueId(Record[I]).first;
if (IsOldProfileFormat) {
I += 1; // Skip old callsitecount field
if (HasProfile)
I += 1; // Skip old profilecount field
} else if (HasProfile)
Hotness = static_cast<CalleeInfo::HotnessType>(Record[++I]);
- Ret.push_back(FunctionSummary::EdgeTy{CalleeGUID, CalleeInfo{Hotness}});
+ Ret.push_back(FunctionSummary::EdgeTy{Callee, CalleeInfo{Hotness}});
}
return Ret;
}
@@ -5027,7 +5029,8 @@ Error ModuleSummaryIndexBitcodeReader::parseEntireSummary() {
case bitc::FS_VALUE_GUID: { // [valueid, refguid]
uint64_t ValueID = Record[0];
GlobalValue::GUID RefGUID = Record[1];
- ValueIdToCallGraphGUIDMap[ValueID] = std::make_pair(RefGUID, RefGUID);
+ ValueIdToValueInfoMap[ValueID] =
+ std::make_pair(TheIndex.getOrInsertValueInfo(RefGUID), RefGUID);
break;
}
// FS_PERMODULE: [valueid, flags, instcount, numrefs, numrefs x valueid,
@@ -5068,10 +5071,10 @@ Error ModuleSummaryIndexBitcodeReader::parseEntireSummary() {
PendingTypeCheckedLoadVCalls.clear();
PendingTypeTestAssumeConstVCalls.clear();
PendingTypeCheckedLoadConstVCalls.clear();
- auto GUID = getGUIDFromValueId(ValueID);
+ auto VIAndOriginalGUID = getValueInfoFromValueId(ValueID);
FS->setModulePath(addThisModulePath()->first());
- FS->setOriginalName(GUID.second);
- TheIndex.addGlobalValueSummary(GUID.first, std::move(FS));
+ FS->setOriginalName(VIAndOriginalGUID.second);
+ TheIndex.addGlobalValueSummary(VIAndOriginalGUID.first, std::move(FS));
break;
}
// FS_ALIAS: [valueid, flags, valueid]
@@ -5091,14 +5094,15 @@ Error ModuleSummaryIndexBitcodeReader::parseEntireSummary() {
// ownership.
AS->setModulePath(addThisModulePath()->first());
- GlobalValue::GUID AliaseeGUID = getGUIDFromValueId(AliaseeID).first;
+ GlobalValue::GUID AliaseeGUID =
+ getValueInfoFromValueId(AliaseeID).first.getGUID();
auto AliaseeInModule =
TheIndex.findSummaryInModule(AliaseeGUID, ModulePath);
if (!AliaseeInModule)
return error("Alias expects aliasee summary to be parsed");
AS->setAliasee(AliaseeInModule);
- auto GUID = getGUIDFromValueId(ValueID);
+ auto GUID = getValueInfoFromValueId(ValueID);
AS->setOriginalName(GUID.second);
TheIndex.addGlobalValueSummary(GUID.first, std::move(AS));
break;
@@ -5112,7 +5116,7 @@ Error ModuleSummaryIndexBitcodeReader::parseEntireSummary() {
makeRefList(ArrayRef<uint64_t>(Record).slice(2));
auto FS = llvm::make_unique<GlobalVarSummary>(Flags, std::move(Refs));
FS->setModulePath(addThisModulePath()->first());
- auto GUID = getGUIDFromValueId(ValueID);
+ auto GUID = getValueInfoFromValueId(ValueID);
FS->setOriginalName(GUID.second);
TheIndex.addGlobalValueSummary(GUID.first, std::move(FS));
break;
@@ -5139,7 +5143,7 @@ Error ModuleSummaryIndexBitcodeReader::parseEntireSummary() {
std::vector<FunctionSummary::EdgeTy> Edges = makeCallList(
ArrayRef<uint64_t>(Record).slice(CallGraphEdgeStartIndex),
IsOldProfileFormat, HasProfile);
- GlobalValue::GUID GUID = getGUIDFromValueId(ValueID).first;
+ ValueInfo VI = getValueInfoFromValueId(ValueID).first;
auto FS = llvm::make_unique<FunctionSummary>(
Flags, InstCount, std::move(Refs), std::move(Edges),
std::move(PendingTypeTests), std::move(PendingTypeTestAssumeVCalls),
@@ -5152,9 +5156,9 @@ Error ModuleSummaryIndexBitcodeReader::parseEntireSummary() {
PendingTypeTestAssumeConstVCalls.clear();
PendingTypeCheckedLoadConstVCalls.clear();
LastSeenSummary = FS.get();
- LastSeenGUID = GUID;
+ LastSeenGUID = VI.getGUID();
FS->setModulePath(ModuleIdMap[ModuleId]);
- TheIndex.addGlobalValueSummary(GUID, std::move(FS));
+ TheIndex.addGlobalValueSummary(VI, std::move(FS));
break;
}
// FS_COMBINED_ALIAS: [valueid, modid, flags, valueid]
@@ -5170,16 +5174,17 @@ Error ModuleSummaryIndexBitcodeReader::parseEntireSummary() {
LastSeenSummary = AS.get();
AS->setModulePath(ModuleIdMap[ModuleId]);
- auto AliaseeGUID = getGUIDFromValueId(AliaseeValueId).first;
+ auto AliaseeGUID =
+ getValueInfoFromValueId(AliaseeValueId).first.getGUID();
auto AliaseeInModule =
TheIndex.findSummaryInModule(AliaseeGUID, AS->modulePath());
if (!AliaseeInModule)
return error("Alias expects aliasee summary to be parsed");
AS->setAliasee(AliaseeInModule);
- GlobalValue::GUID GUID = getGUIDFromValueId(ValueID).first;
- LastSeenGUID = GUID;
- TheIndex.addGlobalValueSummary(GUID, std::move(AS));
+ ValueInfo VI = getValueInfoFromValueId(ValueID).first;
+ LastSeenGUID = VI.getGUID();
+ TheIndex.addGlobalValueSummary(VI, std::move(AS));
break;
}
// FS_COMBINED_GLOBALVAR_INIT_REFS: [valueid, modid, flags, n x valueid]
@@ -5193,9 +5198,9 @@ Error ModuleSummaryIndexBitcodeReader::parseEntireSummary() {
auto FS = llvm::make_unique<GlobalVarSummary>(Flags, std::move(Refs));
LastSeenSummary = FS.get();
FS->setModulePath(ModuleIdMap[ModuleId]);
- GlobalValue::GUID GUID = getGUIDFromValueId(ValueID).first;
- LastSeenGUID = GUID;
- TheIndex.addGlobalValueSummary(GUID, std::move(FS));
+ ValueInfo VI = getValueInfoFromValueId(ValueID).first;
+ LastSeenGUID = VI.getGUID();
+ TheIndex.addGlobalValueSummary(VI, std::move(FS));
break;
}
// FS_COMBINED_ORIGINAL_NAME: [original_name]
diff --git a/lib/Bitcode/Writer/BitcodeWriter.cpp b/lib/Bitcode/Writer/BitcodeWriter.cpp
index 485d9b6ac0bc..1b8d81a60201 100644
--- a/lib/Bitcode/Writer/BitcodeWriter.cpp
+++ b/lib/Bitcode/Writer/BitcodeWriter.cpp
@@ -156,14 +156,14 @@ public:
return;
for (const auto &GUIDSummaryLists : *Index)
// Examine all summaries for this GUID.
- for (auto &Summary : GUIDSummaryLists.second)
+ for (auto &Summary : GUIDSummaryLists.second.SummaryList)
if (auto FS = dyn_cast<FunctionSummary>(Summary.get()))
// For each call in the function summary, see if the call
// is to a GUID (which means it is for an indirect call,
// otherwise we would have a Value for it). If so, synthesize
// a value id.
for (auto &CallEdge : FS->calls())
- if (CallEdge.first.isGUID())
+ if (!CallEdge.first.getValue())
assignValueId(CallEdge.first.getGUID());
}
@@ -304,7 +304,7 @@ private:
}
// Helper to get the valueId for the type of value recorded in VI.
unsigned getValueId(ValueInfo VI) {
- if (VI.isGUID())
+ if (!VI.getValue())
return getValueId(VI.getGUID());
return VE.getValueID(VI.getValue());
}
@@ -358,7 +358,7 @@ public:
Callback(Summary);
} else {
for (auto &Summaries : Index)
- for (auto &Summary : Summaries.second)
+ for (auto &Summary : Summaries.second.SummaryList)
Callback({Summaries.first, Summary.get()});
}
}
@@ -3270,15 +3270,14 @@ void ModuleBitcodeWriter::writePerModuleFunctionSummaryRecord(
void ModuleBitcodeWriter::writeModuleLevelReferences(
const GlobalVariable &V, SmallVector<uint64_t, 64> &NameVals,
unsigned FSModRefsAbbrev) {
- auto Summaries =
- Index->findGlobalValueSummaryList(GlobalValue::getGUID(V.getName()));
- if (Summaries == Index->end()) {
+ auto VI = Index->getValueInfo(GlobalValue::getGUID(V.getName()));
+ if (!VI || VI.getSummaryList().empty()) {
// Only declarations should not have a summary (a declaration might however
// have a summary if the def was in module level asm).
assert(V.isDeclaration());
return;
}
- auto *Summary = Summaries->second.front().get();
+ auto *Summary = VI.getSummaryList()[0].get();
NameVals.push_back(VE.getValueID(&V));
GlobalVarSummary *VS = cast<GlobalVarSummary>(Summary);
NameVals.push_back(getEncodedGVSummaryFlags(VS->flags()));
@@ -3367,15 +3366,14 @@ void ModuleBitcodeWriter::writePerModuleGlobalValueSummary() {
if (!F.hasName())
report_fatal_error("Unexpected anonymous function when writing summary");
- auto Summaries =
- Index->findGlobalValueSummaryList(GlobalValue::getGUID(F.getName()));
- if (Summaries == Index->end()) {
+ ValueInfo VI = Index->getValueInfo(GlobalValue::getGUID(F.getName()));
+ if (!VI || VI.getSummaryList().empty()) {
// Only declarations should not have a summary (a declaration might
// however have a summary if the def was in module level asm).
assert(F.isDeclaration());
continue;
}
- auto *Summary = Summaries->second.front().get();
+ auto *Summary = VI.getSummaryList()[0].get();
writePerModuleFunctionSummaryRecord(NameVals, Summary, VE.getValueID(&F),
FSCallsAbbrev, FSCallsProfileAbbrev, F);
}
diff --git a/lib/CodeGen/AsmPrinter/AsmPrinter.cpp b/lib/CodeGen/AsmPrinter/AsmPrinter.cpp
index b11e30c359b3..7ddb86d80bf0 100644
--- a/lib/CodeGen/AsmPrinter/AsmPrinter.cpp
+++ b/lib/CodeGen/AsmPrinter/AsmPrinter.cpp
@@ -2761,37 +2761,63 @@ void AsmPrinter::emitXRayTable() {
auto PrevSection = OutStreamer->getCurrentSectionOnly();
auto Fn = MF->getFunction();
- MCSection *Section = nullptr;
+ MCSection *InstMap = nullptr;
+ MCSection *FnSledIndex = nullptr;
if (MF->getSubtarget().getTargetTriple().isOSBinFormatELF()) {
if (Fn->hasComdat()) {
- Section = OutContext.getELFSection("xray_instr_map", ELF::SHT_PROGBITS,
+ InstMap = OutContext.getELFSection("xray_instr_map", ELF::SHT_PROGBITS,
ELF::SHF_ALLOC | ELF::SHF_GROUP, 0,
Fn->getComdat()->getName());
+ FnSledIndex = OutContext.getELFSection("xray_fn_idx", ELF::SHT_PROGBITS,
+ ELF::SHF_ALLOC | ELF::SHF_GROUP, 0,
+ Fn->getComdat()->getName());
} else {
- Section = OutContext.getELFSection("xray_instr_map", ELF::SHT_PROGBITS,
+ InstMap = OutContext.getELFSection("xray_instr_map", ELF::SHT_PROGBITS,
ELF::SHF_ALLOC);
+ FnSledIndex = OutContext.getELFSection("xray_fn_idx", ELF::SHT_PROGBITS,
+ ELF::SHF_ALLOC);
}
} else if (MF->getSubtarget().getTargetTriple().isOSBinFormatMachO()) {
- Section = OutContext.getMachOSection("__DATA", "xray_instr_map", 0,
+ InstMap = OutContext.getMachOSection("__DATA", "xray_instr_map", 0,
SectionKind::getReadOnlyWithRel());
+ FnSledIndex = OutContext.getMachOSection("__DATA", "xray_fn_idx", 0,
+ SectionKind::getReadOnlyWithRel());
} else {
llvm_unreachable("Unsupported target");
}
// Before we switch over, we force a reference to a label inside the
- // xray_instr_map section. Since this function is always called just
- // before the function's end, we assume that this is happening after
- // the last return instruction.
-
+ // xray_instr_map and xray_fn_idx sections. Since this function is always
+ // called just before the function's end, we assume that this is happening
+ // after the last return instruction. We also use the synthetic label in the
+ // xray_inster_map as a delimeter for the range of sleds for this function in
+ // the index.
auto WordSizeBytes = MAI->getCodePointerSize();
- MCSymbol *Tmp = OutContext.createTempSymbol("xray_synthetic_", true);
+ MCSymbol *SledsStart = OutContext.createTempSymbol("xray_synthetic_", true);
+ MCSymbol *IdxRef = OutContext.createTempSymbol("xray_fn_idx_synth_", true);
OutStreamer->EmitCodeAlignment(16);
- OutStreamer->EmitSymbolValue(Tmp, WordSizeBytes, false);
- OutStreamer->SwitchSection(Section);
- OutStreamer->EmitLabel(Tmp);
+ OutStreamer->EmitSymbolValue(SledsStart, WordSizeBytes, false);
+ OutStreamer->EmitSymbolValue(IdxRef, WordSizeBytes, false);
+
+ // Now we switch to the instrumentation map section. Because this is done
+ // per-function, we are able to create an index entry that will represent the
+ // range of sleds associated with a function.
+ OutStreamer->SwitchSection(InstMap);
+ OutStreamer->EmitLabel(SledsStart);
for (const auto &Sled : Sleds)
Sled.emit(WordSizeBytes, OutStreamer.get(), CurrentFnSym);
-
+ MCSymbol *SledsEnd = OutContext.createTempSymbol("xray_synthetic_end", true);
+ OutStreamer->EmitLabel(SledsEnd);
+
+ // We then emit a single entry in the index per function. We use the symbols
+ // that bound the instrumentation map as the range for a specific function.
+ // Each entry here will be 2 * word size aligned, as we're writing down two
+ // pointers. This should work for both 32-bit and 64-bit platforms.
+ OutStreamer->SwitchSection(FnSledIndex);
+ OutStreamer->EmitCodeAlignment(2 * WordSizeBytes);
+ OutStreamer->EmitLabel(IdxRef);
+ OutStreamer->EmitSymbolValue(SledsStart, WordSizeBytes);
+ OutStreamer->EmitSymbolValue(SledsEnd, WordSizeBytes);
OutStreamer->SwitchSection(PrevSection);
Sleds.clear();
}
diff --git a/lib/CodeGen/AsmPrinter/CodeViewDebug.cpp b/lib/CodeGen/AsmPrinter/CodeViewDebug.cpp
index 786b11618d75..87b45c001de4 100644
--- a/lib/CodeGen/AsmPrinter/CodeViewDebug.cpp
+++ b/lib/CodeGen/AsmPrinter/CodeViewDebug.cpp
@@ -469,7 +469,7 @@ void CodeViewDebug::emitTypeInformation() {
CommentPrefix += ' ';
}
- TypeDatabase TypeDB;
+ TypeDatabase TypeDB(TypeTable.records().size());
CVTypeDumper CVTD(TypeDB);
TypeTable.ForEachRecord([&](TypeIndex Index, ArrayRef<uint8_t> Record) {
if (OS.isVerboseAsm()) {
@@ -1705,10 +1705,12 @@ TypeIndex CodeViewDebug::lowerCompleteTypeClass(const DICompositeType *Ty) {
SizeInBytes, FullName, Ty->getIdentifier());
TypeIndex ClassTI = TypeTable.writeKnownType(CR);
- StringIdRecord SIDR(TypeIndex(0x0), getFullFilepath(Ty->getFile()));
- TypeIndex SIDI = TypeTable.writeKnownType(SIDR);
- UdtSourceLineRecord USLR(ClassTI, SIDI, Ty->getLine());
- TypeTable.writeKnownType(USLR);
+ if (const auto *File = Ty->getFile()) {
+ StringIdRecord SIDR(TypeIndex(0x0), getFullFilepath(File));
+ TypeIndex SIDI = TypeTable.writeKnownType(SIDR);
+ UdtSourceLineRecord USLR(ClassTI, SIDI, Ty->getLine());
+ TypeTable.writeKnownType(USLR);
+ }
addToUDTs(Ty, ClassTI);
diff --git a/lib/CodeGen/BranchFolding.cpp b/lib/CodeGen/BranchFolding.cpp
index 2d01301402f0..b63d9f4a4351 100644
--- a/lib/CodeGen/BranchFolding.cpp
+++ b/lib/CodeGen/BranchFolding.cpp
@@ -1850,8 +1850,8 @@ bool BranchFolder::HoistCommonCodeInSuccs(MachineBasicBlock *MBB) {
return false;
bool HasDups = false;
- SmallVector<unsigned, 4> LocalDefs;
- SmallSet<unsigned, 4> LocalDefsSet;
+ SmallVector<unsigned, 4> LocalDefs, LocalKills;
+ SmallSet<unsigned, 4> ActiveDefsSet, AllDefsSet;
MachineBasicBlock::iterator TIB = TBB->begin();
MachineBasicBlock::iterator FIB = FBB->begin();
MachineBasicBlock::iterator TIE = TBB->end();
@@ -1905,7 +1905,7 @@ bool BranchFolder::HoistCommonCodeInSuccs(MachineBasicBlock *MBB) {
IsSafe = false;
break;
}
- } else if (!LocalDefsSet.count(Reg)) {
+ } else if (!ActiveDefsSet.count(Reg)) {
if (Defs.count(Reg)) {
// Use is defined by the instruction at the point of insertion.
IsSafe = false;
@@ -1925,18 +1925,22 @@ bool BranchFolder::HoistCommonCodeInSuccs(MachineBasicBlock *MBB) {
if (!TIB->isSafeToMove(nullptr, DontMoveAcrossStore))
break;
- // Remove kills from LocalDefsSet, these registers had short live ranges.
+ // Remove kills from ActiveDefsSet, these registers had short live ranges.
for (const MachineOperand &MO : TIB->operands()) {
if (!MO.isReg() || !MO.isUse() || !MO.isKill())
continue;
unsigned Reg = MO.getReg();
- if (!Reg || !LocalDefsSet.count(Reg))
+ if (!Reg)
+ continue;
+ if (!AllDefsSet.count(Reg)) {
+ LocalKills.push_back(Reg);
continue;
+ }
if (TargetRegisterInfo::isPhysicalRegister(Reg)) {
for (MCRegAliasIterator AI(Reg, TRI, true); AI.isValid(); ++AI)
- LocalDefsSet.erase(*AI);
+ ActiveDefsSet.erase(*AI);
} else {
- LocalDefsSet.erase(Reg);
+ ActiveDefsSet.erase(Reg);
}
}
@@ -1948,7 +1952,8 @@ bool BranchFolder::HoistCommonCodeInSuccs(MachineBasicBlock *MBB) {
if (!Reg || TargetRegisterInfo::isVirtualRegister(Reg))
continue;
LocalDefs.push_back(Reg);
- addRegAndItsAliases(Reg, TRI, LocalDefsSet);
+ addRegAndItsAliases(Reg, TRI, ActiveDefsSet);
+ addRegAndItsAliases(Reg, TRI, AllDefsSet);
}
HasDups = true;
@@ -1963,17 +1968,22 @@ bool BranchFolder::HoistCommonCodeInSuccs(MachineBasicBlock *MBB) {
FBB->erase(FBB->begin(), FIB);
// Update livein's.
- bool AddedLiveIns = false;
+ bool ChangedLiveIns = false;
for (unsigned i = 0, e = LocalDefs.size(); i != e; ++i) {
unsigned Def = LocalDefs[i];
- if (LocalDefsSet.count(Def)) {
+ if (ActiveDefsSet.count(Def)) {
TBB->addLiveIn(Def);
FBB->addLiveIn(Def);
- AddedLiveIns = true;
+ ChangedLiveIns = true;
}
}
+ for (unsigned K : LocalKills) {
+ TBB->removeLiveIn(K);
+ FBB->removeLiveIn(K);
+ ChangedLiveIns = true;
+ }
- if (AddedLiveIns) {
+ if (ChangedLiveIns) {
TBB->sortUniqueLiveIns();
FBB->sortUniqueLiveIns();
}
diff --git a/lib/CodeGen/GlobalISel/IRTranslator.cpp b/lib/CodeGen/GlobalISel/IRTranslator.cpp
index 75be7a55bd2a..811858f136eb 100644
--- a/lib/CodeGen/GlobalISel/IRTranslator.cpp
+++ b/lib/CodeGen/GlobalISel/IRTranslator.cpp
@@ -1108,6 +1108,14 @@ bool IRTranslator::translate(const Constant &C, unsigned Reg) {
default:
return false;
}
+ } else if (auto CV = dyn_cast<ConstantVector>(&C)) {
+ if (CV->getNumOperands() == 1)
+ return translate(*CV->getOperand(0), Reg);
+ SmallVector<unsigned, 4> Ops;
+ for (unsigned i = 0; i < CV->getNumOperands(); ++i) {
+ Ops.push_back(getOrCreateVReg(*CV->getOperand(i)));
+ }
+ EntryBuilder.buildMerge(Reg, Ops);
} else
return false;
@@ -1199,9 +1207,6 @@ bool IRTranslator::runOnMachineFunction(MachineFunction &CurMF) {
finishPendingPhis();
- auto &TLI = *MF->getSubtarget().getTargetLowering();
- TLI.finalizeLowering(*MF);
-
// Merge the argument lowering and constants block with its single
// successor, the LLVM-IR entry block. We want the basic block to
// be maximal.
diff --git a/lib/CodeGen/GlobalISel/InstructionSelect.cpp b/lib/CodeGen/GlobalISel/InstructionSelect.cpp
index cf97c635e79a..a16e14fe2db6 100644
--- a/lib/CodeGen/GlobalISel/InstructionSelect.cpp
+++ b/lib/CodeGen/GlobalISel/InstructionSelect.cpp
@@ -24,6 +24,7 @@
#include "llvm/IR/Function.h"
#include "llvm/Support/CommandLine.h"
#include "llvm/Support/Debug.h"
+#include "llvm/Target/TargetLowering.h"
#include "llvm/Target/TargetSubtargetInfo.h"
#define DEBUG_TYPE "instruction-select"
@@ -70,8 +71,7 @@ bool InstructionSelect::runOnMachineFunction(MachineFunction &MF) {
// An optimization remark emitter. Used to report failures.
MachineOptimizationRemarkEmitter MORE(MF, /*MBFI=*/nullptr);
- // FIXME: freezeReservedRegs is now done in IRTranslator, but there are many
- // other MF/MFI fields we need to initialize.
+ // FIXME: There are many other MF/MFI fields we need to initialize.
#ifndef NDEBUG
// Check that our input is fully legal: we require the function to have the
@@ -184,6 +184,9 @@ bool InstructionSelect::runOnMachineFunction(MachineFunction &MF) {
return false;
}
+ auto &TLI = *MF.getSubtarget().getTargetLowering();
+ TLI.finalizeLowering(MF);
+
// FIXME: Should we accurately track changes?
return true;
}
diff --git a/lib/CodeGen/GlobalISel/Legalizer.cpp b/lib/CodeGen/GlobalISel/Legalizer.cpp
index 74ed58e8d049..aec379197dfb 100644
--- a/lib/CodeGen/GlobalISel/Legalizer.cpp
+++ b/lib/CodeGen/GlobalISel/Legalizer.cpp
@@ -176,8 +176,13 @@ bool Legalizer::runOnMachineFunction(MachineFunction &MF) {
unsigned NumNewInsns = 0;
SmallVector<MachineInstr *, 4> WorkList;
Helper.MIRBuilder.recordInsertions([&](MachineInstr *MI) {
- ++NumNewInsns;
- WorkList.push_back(MI);
+ // Only legalize pre-isel generic instructions.
+ // Legalization process could generate Target specific pseudo
+ // instructions with generic types. Don't record them
+ if (isPreISelGenericOpcode(MI->getOpcode())) {
+ ++NumNewInsns;
+ WorkList.push_back(MI);
+ }
});
WorkList.push_back(&*MI);
diff --git a/lib/CodeGen/GlobalISel/RegBankSelect.cpp b/lib/CodeGen/GlobalISel/RegBankSelect.cpp
index f935390a8d1b..7248f50945d0 100644
--- a/lib/CodeGen/GlobalISel/RegBankSelect.cpp
+++ b/lib/CodeGen/GlobalISel/RegBankSelect.cpp
@@ -213,21 +213,23 @@ uint64_t RegBankSelect::getRepairCost(
return UINT_MAX;
}
-RegisterBankInfo::InstructionMapping &RegBankSelect::findBestMapping(
+const RegisterBankInfo::InstructionMapping &RegBankSelect::findBestMapping(
MachineInstr &MI, RegisterBankInfo::InstructionMappings &PossibleMappings,
SmallVectorImpl<RepairingPlacement> &RepairPts) {
assert(!PossibleMappings.empty() &&
"Do not know how to map this instruction");
- RegisterBankInfo::InstructionMapping *BestMapping = nullptr;
+ const RegisterBankInfo::InstructionMapping *BestMapping = nullptr;
MappingCost Cost = MappingCost::ImpossibleCost();
SmallVector<RepairingPlacement, 4> LocalRepairPts;
- for (RegisterBankInfo::InstructionMapping &CurMapping : PossibleMappings) {
- MappingCost CurCost = computeMapping(MI, CurMapping, LocalRepairPts, &Cost);
+ for (const RegisterBankInfo::InstructionMapping *CurMapping :
+ PossibleMappings) {
+ MappingCost CurCost =
+ computeMapping(MI, *CurMapping, LocalRepairPts, &Cost);
if (CurCost < Cost) {
DEBUG(dbgs() << "New best: " << CurCost << '\n');
Cost = CurCost;
- BestMapping = &CurMapping;
+ BestMapping = CurMapping;
RepairPts.clear();
for (RepairingPlacement &RepairPt : LocalRepairPts)
RepairPts.emplace_back(std::move(RepairPt));
@@ -237,7 +239,7 @@ RegisterBankInfo::InstructionMapping &RegBankSelect::findBestMapping(
// If none of the mapping worked that means they are all impossible.
// Thus, pick the first one and set an impossible repairing point.
// It will trigger the failed isel mode.
- BestMapping = &(*PossibleMappings.begin());
+ BestMapping = *PossibleMappings.begin();
RepairPts.emplace_back(
RepairingPlacement(MI, 0, *TRI, *this, RepairingPlacement::Impossible));
} else
@@ -543,10 +545,10 @@ bool RegBankSelect::assignInstr(MachineInstr &MI) {
// Remember the repairing placement for all the operands.
SmallVector<RepairingPlacement, 4> RepairPts;
- RegisterBankInfo::InstructionMapping BestMapping;
+ const RegisterBankInfo::InstructionMapping *BestMapping;
if (OptMode == RegBankSelect::Mode::Fast) {
- BestMapping = RBI->getInstrMapping(MI);
- MappingCost DefaultCost = computeMapping(MI, BestMapping, RepairPts);
+ BestMapping = &RBI->getInstrMapping(MI);
+ MappingCost DefaultCost = computeMapping(MI, *BestMapping, RepairPts);
(void)DefaultCost;
if (DefaultCost == MappingCost::ImpossibleCost())
return false;
@@ -555,16 +557,16 @@ bool RegBankSelect::assignInstr(MachineInstr &MI) {
RBI->getInstrPossibleMappings(MI);
if (PossibleMappings.empty())
return false;
- BestMapping = std::move(findBestMapping(MI, PossibleMappings, RepairPts));
+ BestMapping = &findBestMapping(MI, PossibleMappings, RepairPts);
}
// Make sure the mapping is valid for MI.
- assert(BestMapping.verify(MI) && "Invalid instruction mapping");
+ assert(BestMapping->verify(MI) && "Invalid instruction mapping");
- DEBUG(dbgs() << "Best Mapping: " << BestMapping << '\n');
+ DEBUG(dbgs() << "Best Mapping: " << *BestMapping << '\n');
// After this call, MI may not be valid anymore.
// Do not use it.
- return applyMapping(MI, BestMapping, RepairPts);
+ return applyMapping(MI, *BestMapping, RepairPts);
}
bool RegBankSelect::runOnMachineFunction(MachineFunction &MF) {
diff --git a/lib/CodeGen/GlobalISel/RegisterBankInfo.cpp b/lib/CodeGen/GlobalISel/RegisterBankInfo.cpp
index d5ae9a6776a4..a841902feed1 100644
--- a/lib/CodeGen/GlobalISel/RegisterBankInfo.cpp
+++ b/lib/CodeGen/GlobalISel/RegisterBankInfo.cpp
@@ -45,6 +45,10 @@ STATISTIC(NumOperandsMappingsCreated,
"Number of operands mappings dynamically created");
STATISTIC(NumOperandsMappingsAccessed,
"Number of operands mappings dynamically accessed");
+STATISTIC(NumInstructionMappingsCreated,
+ "Number of instruction mappings dynamically created");
+STATISTIC(NumInstructionMappingsAccessed,
+ "Number of instruction mappings dynamically accessed");
const unsigned RegisterBankInfo::DefaultMappingID = UINT_MAX;
const unsigned RegisterBankInfo::InvalidMappingID = UINT_MAX - 1;
@@ -137,7 +141,7 @@ static bool isCopyLike(const MachineInstr &MI) {
MI.getOpcode() == TargetOpcode::REG_SEQUENCE;
}
-RegisterBankInfo::InstructionMapping
+const RegisterBankInfo::InstructionMapping &
RegisterBankInfo::getInstrMappingImpl(const MachineInstr &MI) const {
// For copies we want to walk over the operands and try to find one
// that has a register bank since the instruction itself will not get
@@ -147,9 +151,6 @@ RegisterBankInfo::getInstrMappingImpl(const MachineInstr &MI) const {
// is important. The rest is not constrained.
unsigned NumOperandsForMapping = IsCopyLike ? 1 : MI.getNumOperands();
- RegisterBankInfo::InstructionMapping Mapping(DefaultMappingID, /*Cost*/ 1,
- /*OperandsMapping*/ nullptr,
- NumOperandsForMapping);
const MachineFunction &MF = *MI.getParent()->getParent();
const TargetSubtargetInfo &STI = MF.getSubtarget();
const TargetRegisterInfo &TRI = *STI.getRegisterInfo();
@@ -190,7 +191,7 @@ RegisterBankInfo::getInstrMappingImpl(const MachineInstr &MI) const {
if (!IsCopyLike)
// MI does not carry enough information to guess the mapping.
- return InstructionMapping();
+ return getInvalidInstructionMapping();
continue;
}
}
@@ -206,11 +207,13 @@ RegisterBankInfo::getInstrMappingImpl(const MachineInstr &MI) const {
if (IsCopyLike && !CompleteMapping)
// No way to deduce the type from what we have.
- return InstructionMapping();
+ return getInvalidInstructionMapping();
assert(CompleteMapping && "Setting an uncomplete mapping");
- Mapping.setOperandsMapping(getOperandsMapping(OperandsMapping));
- return Mapping;
+ return getInstructionMapping(
+ DefaultMappingID, /*Cost*/ 1,
+ /*OperandsMapping*/ getOperandsMapping(OperandsMapping),
+ NumOperandsForMapping);
}
/// Hashing function for PartialMapping.
@@ -320,9 +323,44 @@ const RegisterBankInfo::ValueMapping *RegisterBankInfo::getOperandsMapping(
return getOperandsMapping(OpdsMapping.begin(), OpdsMapping.end());
}
-RegisterBankInfo::InstructionMapping
+static hash_code
+hashInstructionMapping(unsigned ID, unsigned Cost,
+ const RegisterBankInfo::ValueMapping *OperandsMapping,
+ unsigned NumOperands) {
+ return hash_combine(ID, Cost, OperandsMapping, NumOperands);
+}
+
+const RegisterBankInfo::InstructionMapping &
+RegisterBankInfo::getInstructionMappingImpl(
+ bool IsInvalid, unsigned ID, unsigned Cost,
+ const RegisterBankInfo::ValueMapping *OperandsMapping,
+ unsigned NumOperands) const {
+ assert(((IsInvalid && ID == InvalidMappingID && Cost == 0 &&
+ OperandsMapping == nullptr && NumOperands == 0) ||
+ !IsInvalid) &&
+ "Mismatch argument for invalid input");
+ ++NumInstructionMappingsAccessed;
+
+ hash_code Hash =
+ hashInstructionMapping(ID, Cost, OperandsMapping, NumOperands);
+ const auto &It = MapOfInstructionMappings.find(Hash);
+ if (It != MapOfInstructionMappings.end())
+ return *It->second;
+
+ ++NumInstructionMappingsCreated;
+
+ auto &InstrMapping = MapOfInstructionMappings[Hash];
+ if (IsInvalid)
+ InstrMapping = llvm::make_unique<InstructionMapping>();
+ else
+ InstrMapping = llvm::make_unique<InstructionMapping>(
+ ID, Cost, OperandsMapping, NumOperands);
+ return *InstrMapping;
+}
+
+const RegisterBankInfo::InstructionMapping &
RegisterBankInfo::getInstrMapping(const MachineInstr &MI) const {
- RegisterBankInfo::InstructionMapping Mapping = getInstrMappingImpl(MI);
+ const RegisterBankInfo::InstructionMapping &Mapping = getInstrMappingImpl(MI);
if (Mapping.isValid())
return Mapping;
llvm_unreachable("The target must implement this");
@@ -332,14 +370,14 @@ RegisterBankInfo::InstructionMappings
RegisterBankInfo::getInstrPossibleMappings(const MachineInstr &MI) const {
InstructionMappings PossibleMappings;
// Put the default mapping first.
- PossibleMappings.push_back(getInstrMapping(MI));
+ PossibleMappings.push_back(&getInstrMapping(MI));
// Then the alternative mapping, if any.
InstructionMappings AltMappings = getInstrAlternativeMappings(MI);
- for (InstructionMapping &AltMapping : AltMappings)
- PossibleMappings.emplace_back(std::move(AltMapping));
+ for (const InstructionMapping *AltMapping : AltMappings)
+ PossibleMappings.push_back(AltMapping);
#ifndef NDEBUG
- for (const InstructionMapping &Mapping : PossibleMappings)
- assert(Mapping.verify(MI) && "Mapping is invalid");
+ for (const InstructionMapping *Mapping : PossibleMappings)
+ assert(Mapping->verify(MI) && "Mapping is invalid");
#endif
return PossibleMappings;
}
diff --git a/lib/CodeGen/MIRParser/MIParser.cpp b/lib/CodeGen/MIRParser/MIParser.cpp
index cac22af32956..1d36ff4e1458 100644
--- a/lib/CodeGen/MIRParser/MIParser.cpp
+++ b/lib/CodeGen/MIRParser/MIParser.cpp
@@ -12,11 +12,13 @@
//===----------------------------------------------------------------------===//
#include "MIParser.h"
+
#include "MILexer.h"
#include "llvm/ADT/StringMap.h"
#include "llvm/ADT/StringSwitch.h"
#include "llvm/AsmParser/Parser.h"
#include "llvm/AsmParser/SlotMapping.h"
+#include "llvm/CodeGen/MIRPrinter.h"
#include "llvm/CodeGen/MachineBasicBlock.h"
#include "llvm/CodeGen/MachineFrameInfo.h"
#include "llvm/CodeGen/MachineFunction.h"
@@ -134,7 +136,8 @@ public:
bool
parseBasicBlockDefinition(DenseMap<unsigned, MachineBasicBlock *> &MBBSlots);
- bool parseBasicBlock(MachineBasicBlock &MBB);
+ bool parseBasicBlock(MachineBasicBlock &MBB,
+ MachineBasicBlock *&AddFalthroughFrom);
bool parseBasicBlockLiveins(MachineBasicBlock &MBB);
bool parseBasicBlockSuccessors(MachineBasicBlock &MBB);
@@ -518,7 +521,8 @@ bool MIParser::parseBasicBlockSuccessors(MachineBasicBlock &MBB) {
return false;
}
-bool MIParser::parseBasicBlock(MachineBasicBlock &MBB) {
+bool MIParser::parseBasicBlock(MachineBasicBlock &MBB,
+ MachineBasicBlock *&AddFalthroughFrom) {
// Skip the definition.
assert(Token.is(MIToken::MachineBasicBlockLabel));
lex();
@@ -538,10 +542,12 @@ bool MIParser::parseBasicBlock(MachineBasicBlock &MBB) {
//
// is equivalent to
// liveins: %edi, %esi
+ bool ExplicitSuccesors = false;
while (true) {
if (Token.is(MIToken::kw_successors)) {
if (parseBasicBlockSuccessors(MBB))
return true;
+ ExplicitSuccesors = true;
} else if (Token.is(MIToken::kw_liveins)) {
if (parseBasicBlockLiveins(MBB))
return true;
@@ -557,10 +563,9 @@ bool MIParser::parseBasicBlock(MachineBasicBlock &MBB) {
// Parse the instructions.
bool IsInBundle = false;
MachineInstr *PrevMI = nullptr;
- while (true) {
- if (Token.is(MIToken::MachineBasicBlockLabel) || Token.is(MIToken::Eof))
- return false;
- else if (consumeIfPresent(MIToken::Newline))
+ while (!Token.is(MIToken::MachineBasicBlockLabel) &&
+ !Token.is(MIToken::Eof)) {
+ if (consumeIfPresent(MIToken::Newline))
continue;
if (consumeIfPresent(MIToken::rbrace)) {
// The first parsing pass should verify that all closing '}' have an
@@ -592,6 +597,22 @@ bool MIParser::parseBasicBlock(MachineBasicBlock &MBB) {
assert(Token.isNewlineOrEOF() && "MI is not fully parsed");
lex();
}
+
+ // Construct successor list by searching for basic block machine operands.
+ if (!ExplicitSuccesors) {
+ SmallVector<MachineBasicBlock*,4> Successors;
+ bool IsFallthrough;
+ guessSuccessors(MBB, Successors, IsFallthrough);
+ for (MachineBasicBlock *Succ : Successors)
+ MBB.addSuccessor(Succ);
+
+ if (IsFallthrough) {
+ AddFalthroughFrom = &MBB;
+ } else {
+ MBB.normalizeSuccProbs();
+ }
+ }
+
return false;
}
@@ -605,11 +626,18 @@ bool MIParser::parseBasicBlocks() {
// The first parsing pass should have verified that this token is a MBB label
// in the 'parseBasicBlockDefinitions' method.
assert(Token.is(MIToken::MachineBasicBlockLabel));
+ MachineBasicBlock *AddFalthroughFrom = nullptr;
do {
MachineBasicBlock *MBB = nullptr;
if (parseMBBReference(MBB))
return true;
- if (parseBasicBlock(*MBB))
+ if (AddFalthroughFrom) {
+ if (!AddFalthroughFrom->isSuccessor(MBB))
+ AddFalthroughFrom->addSuccessor(MBB);
+ AddFalthroughFrom->normalizeSuccProbs();
+ AddFalthroughFrom = nullptr;
+ }
+ if (parseBasicBlock(*MBB, AddFalthroughFrom))
return true;
// The method 'parseBasicBlock' should parse the whole block until the next
// block or the end of file.
diff --git a/lib/CodeGen/MIRPrinter.cpp b/lib/CodeGen/MIRPrinter.cpp
index d017b21f0a59..6f6a67d81b0f 100644
--- a/lib/CodeGen/MIRPrinter.cpp
+++ b/lib/CodeGen/MIRPrinter.cpp
@@ -12,7 +12,8 @@
//
//===----------------------------------------------------------------------===//
-#include "MIRPrinter.h"
+#include "llvm/CodeGen/MIRPrinter.h"
+
#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/SmallBitVector.h"
#include "llvm/CodeGen/GlobalISel/RegisterBank.h"
@@ -34,6 +35,7 @@
#include "llvm/MC/MCSymbol.h"
#include "llvm/Support/Format.h"
#include "llvm/Support/MemoryBuffer.h"
+#include "llvm/Support/Options.h"
#include "llvm/Support/YAMLTraits.h"
#include "llvm/Support/raw_ostream.h"
#include "llvm/Target/TargetInstrInfo.h"
@@ -42,6 +44,9 @@
using namespace llvm;
+static cl::opt<bool> SimplifyMIR("simplify-mir",
+ cl::desc("Leave out unnecessary information when printing MIR"));
+
namespace {
/// This structure describes how to print out stack object references.
@@ -105,6 +110,9 @@ class MIPrinter {
const DenseMap<const uint32_t *, unsigned> &RegisterMaskIds;
const DenseMap<int, FrameIndexOperand> &StackObjectOperandMapping;
+ bool canPredictBranchProbabilities(const MachineBasicBlock &MBB) const;
+ bool canPredictSuccessors(const MachineBasicBlock &MBB) const;
+
public:
MIPrinter(raw_ostream &OS, ModuleSlotTracker &MST,
const DenseMap<const uint32_t *, unsigned> &RegisterMaskIds,
@@ -454,6 +462,63 @@ void MIRPrinter::initRegisterMaskIds(const MachineFunction &MF) {
RegisterMaskIds.insert(std::make_pair(Mask, I++));
}
+void llvm::guessSuccessors(const MachineBasicBlock &MBB,
+ SmallVectorImpl<MachineBasicBlock*> &Result,
+ bool &IsFallthrough) {
+ SmallPtrSet<MachineBasicBlock*,8> Seen;
+
+ for (const MachineInstr &MI : MBB) {
+ if (MI.isPHI())
+ continue;
+ for (const MachineOperand &MO : MI.operands()) {
+ if (!MO.isMBB())
+ continue;
+ MachineBasicBlock *Succ = MO.getMBB();
+ auto RP = Seen.insert(Succ);
+ if (RP.second)
+ Result.push_back(Succ);
+ }
+ }
+ MachineBasicBlock::const_iterator I = MBB.getLastNonDebugInstr();
+ IsFallthrough = I == MBB.end() || !I->isBarrier();
+}
+
+bool
+MIPrinter::canPredictBranchProbabilities(const MachineBasicBlock &MBB) const {
+ if (MBB.succ_size() <= 1)
+ return true;
+ if (!MBB.hasSuccessorProbabilities())
+ return true;
+
+ SmallVector<BranchProbability,8> Normalized(MBB.Probs.begin(),
+ MBB.Probs.end());
+ BranchProbability::normalizeProbabilities(Normalized.begin(),
+ Normalized.end());
+ SmallVector<BranchProbability,8> Equal(Normalized.size());
+ BranchProbability::normalizeProbabilities(Equal.begin(), Equal.end());
+
+ return std::equal(Normalized.begin(), Normalized.end(), Equal.begin());
+}
+
+bool MIPrinter::canPredictSuccessors(const MachineBasicBlock &MBB) const {
+ SmallVector<MachineBasicBlock*,8> GuessedSuccs;
+ bool GuessedFallthrough;
+ guessSuccessors(MBB, GuessedSuccs, GuessedFallthrough);
+ if (GuessedFallthrough) {
+ const MachineFunction &MF = *MBB.getParent();
+ MachineFunction::const_iterator NextI = std::next(MBB.getIterator());
+ if (NextI != MF.end()) {
+ MachineBasicBlock *Next = const_cast<MachineBasicBlock*>(&*NextI);
+ if (!is_contained(GuessedSuccs, Next))
+ GuessedSuccs.push_back(Next);
+ }
+ }
+ if (GuessedSuccs.size() != MBB.succ_size())
+ return false;
+ return std::equal(MBB.succ_begin(), MBB.succ_end(), GuessedSuccs.begin());
+}
+
+
void MIPrinter::print(const MachineBasicBlock &MBB) {
assert(MBB.getNumber() >= 0 && "Invalid MBB number");
OS << "bb." << MBB.getNumber();
@@ -492,13 +557,15 @@ void MIPrinter::print(const MachineBasicBlock &MBB) {
bool HasLineAttributes = false;
// Print the successors
- if (!MBB.succ_empty()) {
+ bool canPredictProbs = canPredictBranchProbabilities(MBB);
+ if (!MBB.succ_empty() && (!SimplifyMIR || !canPredictProbs ||
+ !canPredictSuccessors(MBB))) {
OS.indent(2) << "successors: ";
for (auto I = MBB.succ_begin(), E = MBB.succ_end(); I != E; ++I) {
if (I != MBB.succ_begin())
OS << ", ";
printMBBReference(**I);
- if (MBB.hasSuccessorProbabilities())
+ if (!SimplifyMIR || !canPredictProbs)
OS << '('
<< format("0x%08" PRIx32, MBB.getSuccProbability(I).getNumerator())
<< ')';
diff --git a/lib/CodeGen/MIRPrintingPass.cpp b/lib/CodeGen/MIRPrintingPass.cpp
index c690bcfad567..671cf1eddc2d 100644
--- a/lib/CodeGen/MIRPrintingPass.cpp
+++ b/lib/CodeGen/MIRPrintingPass.cpp
@@ -12,7 +12,8 @@
//
//===----------------------------------------------------------------------===//
-#include "MIRPrinter.h"
+#include "llvm/CodeGen/MIRPrinter.h"
+
#include "llvm/CodeGen/Passes.h"
#include "llvm/CodeGen/MachineFunctionPass.h"
#include "llvm/CodeGen/MIRYamlMapping.h"
diff --git a/lib/CodeGen/MachineFrameInfo.cpp b/lib/CodeGen/MachineFrameInfo.cpp
index 7de8434df806..73d778ff3023 100644
--- a/lib/CodeGen/MachineFrameInfo.cpp
+++ b/lib/CodeGen/MachineFrameInfo.cpp
@@ -19,6 +19,7 @@
#include "llvm/Support/Debug.h"
#include "llvm/Support/raw_ostream.h"
#include "llvm/Target/TargetFrameLowering.h"
+#include "llvm/Target/TargetInstrInfo.h"
#include "llvm/Target/TargetRegisterInfo.h"
#include "llvm/Target/TargetSubtargetInfo.h"
#include <cassert>
@@ -175,6 +176,31 @@ unsigned MachineFrameInfo::estimateStackSize(const MachineFunction &MF) const {
return (unsigned)Offset;
}
+void MachineFrameInfo::computeMaxCallFrameSize(const MachineFunction &MF) {
+ const TargetInstrInfo &TII = *MF.getSubtarget().getInstrInfo();
+ unsigned FrameSetupOpcode = TII.getCallFrameSetupOpcode();
+ unsigned FrameDestroyOpcode = TII.getCallFrameDestroyOpcode();
+ assert(FrameSetupOpcode != ~0u && FrameDestroyOpcode != ~0u &&
+ "Can only compute MaxCallFrameSize if Setup/Destroy opcode are known");
+
+ MaxCallFrameSize = 0;
+ for (const MachineBasicBlock &MBB : MF) {
+ for (const MachineInstr &MI : MBB) {
+ unsigned Opcode = MI.getOpcode();
+ if (Opcode == FrameSetupOpcode || Opcode == FrameDestroyOpcode) {
+ unsigned Size = TII.getFrameSize(MI);
+ MaxCallFrameSize = std::max(MaxCallFrameSize, Size);
+ AdjustsStack = true;
+ } else if (MI.isInlineAsm()) {
+ // Some inline asm's need a stack frame, as indicated by operand 1.
+ unsigned ExtraInfo = MI.getOperand(InlineAsm::MIOp_ExtraInfo).getImm();
+ if (ExtraInfo & InlineAsm::Extra_IsAlignStack)
+ AdjustsStack = true;
+ }
+ }
+ }
+}
+
void MachineFrameInfo::print(const MachineFunction &MF, raw_ostream &OS) const{
if (Objects.empty()) return;
diff --git a/lib/CodeGen/MachineVerifier.cpp b/lib/CodeGen/MachineVerifier.cpp
index 84bd670105e1..bfb2cde030dc 100644
--- a/lib/CodeGen/MachineVerifier.cpp
+++ b/lib/CodeGen/MachineVerifier.cpp
@@ -188,8 +188,9 @@ namespace {
return Reg < regsReserved.size() && regsReserved.test(Reg);
}
- bool isAllocatable(unsigned Reg) {
- return Reg < TRI->getNumRegs() && MRI->isAllocatable(Reg);
+ bool isAllocatable(unsigned Reg) const {
+ return Reg < TRI->getNumRegs() && TRI->isInAllocatableClass(Reg) &&
+ !regsReserved.test(Reg);
}
// Analysis information if available
@@ -526,7 +527,8 @@ void MachineVerifier::markReachable(const MachineBasicBlock *MBB) {
void MachineVerifier::visitMachineFunctionBefore() {
lastIndex = SlotIndex();
- regsReserved = MRI->getReservedRegs();
+ regsReserved = MRI->reservedRegsFrozen() ? MRI->getReservedRegs()
+ : TRI->getReservedRegs(*MF);
if (!MF->empty())
markReachable(&MF->front());
diff --git a/lib/CodeGen/PrologEpilogInserter.cpp b/lib/CodeGen/PrologEpilogInserter.cpp
index 549f07ecd9ce..d2afeae9e70b 100644
--- a/lib/CodeGen/PrologEpilogInserter.cpp
+++ b/lib/CodeGen/PrologEpilogInserter.cpp
@@ -277,6 +277,9 @@ void PEI::calculateCallFrameInfo(MachineFunction &Fn) {
AdjustsStack = true;
}
+ assert(!MFI.isMaxCallFrameSizeComputed() ||
+ (MFI.getMaxCallFrameSize() == MaxCallFrameSize &&
+ MFI.adjustsStack() == AdjustsStack));
MFI.setAdjustsStack(AdjustsStack);
MFI.setMaxCallFrameSize(MaxCallFrameSize);
diff --git a/lib/CodeGen/SelectionDAG/DAGCombiner.cpp b/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
index 03698ac862af..c77046fdfaf5 100644
--- a/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
+++ b/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
@@ -6688,6 +6688,9 @@ SDValue DAGCombiner::visitVSELECT(SDNode *N) {
if (isAbs) {
EVT VT = LHS.getValueType();
+ if (TLI.isOperationLegalOrCustom(ISD::ABS, VT))
+ return DAG.getNode(ISD::ABS, DL, VT, LHS);
+
SDValue Shift = DAG.getNode(
ISD::SRA, DL, VT, LHS,
DAG.getConstant(VT.getScalarSizeInBits() - 1, DL, VT));
@@ -9469,6 +9472,14 @@ SDValue DAGCombiner::visitFMULForFMADistributiveCombine(SDNode *N) {
return SDValue();
}
+static bool isFMulNegTwo(SDValue &N) {
+ if (N.getOpcode() != ISD::FMUL)
+ return false;
+ if (ConstantFPSDNode *CFP = isConstOrConstSplatFP(N.getOperand(1)))
+ return CFP->isExactlyValue(-2.0);
+ return false;
+}
+
SDValue DAGCombiner::visitFADD(SDNode *N) {
SDValue N0 = N->getOperand(0);
SDValue N1 = N->getOperand(1);
@@ -9507,6 +9518,16 @@ SDValue DAGCombiner::visitFADD(SDNode *N) {
return DAG.getNode(ISD::FSUB, DL, VT, N1,
GetNegatedExpression(N0, DAG, LegalOperations), Flags);
+ // fold (fadd A, (fmul B, -2.0)) -> (fsub A, (fadd B, B))
+ // fold (fadd (fmul B, -2.0), A) -> (fsub A, (fadd B, B))
+ if ((isFMulNegTwo(N0) && N0.hasOneUse()) ||
+ (isFMulNegTwo(N1) && N1.hasOneUse())) {
+ bool N1IsFMul = isFMulNegTwo(N1);
+ SDValue AddOp = N1IsFMul ? N1.getOperand(0) : N0.getOperand(0);
+ SDValue Add = DAG.getNode(ISD::FADD, DL, VT, AddOp, AddOp, Flags);
+ return DAG.getNode(ISD::FSUB, DL, VT, N1IsFMul ? N0 : N1, Add, Flags);
+ }
+
// FIXME: Auto-upgrade the target/function-level option.
if (Options.NoSignedZerosFPMath || N->getFlags().hasNoSignedZeros()) {
// fold (fadd A, 0) -> A
diff --git a/lib/CodeGen/SelectionDAG/FastISel.cpp b/lib/CodeGen/SelectionDAG/FastISel.cpp
index 6fb26fc3b73d..8c98e3740f6d 100644
--- a/lib/CodeGen/SelectionDAG/FastISel.cpp
+++ b/lib/CodeGen/SelectionDAG/FastISel.cpp
@@ -861,6 +861,25 @@ bool FastISel::selectPatchpoint(const CallInst *I) {
return true;
}
+bool FastISel::selectXRayCustomEvent(const CallInst *I) {
+ const auto &Triple = TM.getTargetTriple();
+ if (Triple.getArch() != Triple::x86_64 || !Triple.isOSLinux())
+ return true; // don't do anything to this instruction.
+ SmallVector<MachineOperand, 8> Ops;
+ Ops.push_back(MachineOperand::CreateReg(getRegForValue(I->getArgOperand(0)),
+ /*IsDef=*/false));
+ Ops.push_back(MachineOperand::CreateReg(getRegForValue(I->getArgOperand(1)),
+ /*IsDef=*/false));
+ MachineInstrBuilder MIB =
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
+ TII.get(TargetOpcode::PATCHABLE_EVENT_CALL));
+ for (auto &MO : Ops)
+ MIB.add(MO);
+ // Insert the Patchable Event Call instruction, that gets lowered properly.
+ return true;
+}
+
+
/// Returns an AttributeList representing the attributes applied to the return
/// value of the given call.
static AttributeList getReturnAttrs(FastISel::CallLoweringInfo &CLI) {
@@ -1252,6 +1271,9 @@ bool FastISel::selectIntrinsicCall(const IntrinsicInst *II) {
case Intrinsic::experimental_patchpoint_void:
case Intrinsic::experimental_patchpoint_i64:
return selectPatchpoint(II);
+
+ case Intrinsic::xray_customevent:
+ return selectXRayCustomEvent(II);
}
return fastLowerIntrinsicCall(II);
diff --git a/lib/CodeGen/SelectionDAG/FunctionLoweringInfo.cpp b/lib/CodeGen/SelectionDAG/FunctionLoweringInfo.cpp
index a0135dc40b87..cdf4d3a8b4e5 100644
--- a/lib/CodeGen/SelectionDAG/FunctionLoweringInfo.cpp
+++ b/lib/CodeGen/SelectionDAG/FunctionLoweringInfo.cpp
@@ -402,8 +402,7 @@ FunctionLoweringInfo::GetLiveOutRegInfo(unsigned Reg, unsigned BitWidth) {
if (BitWidth > LOI->Known.getBitWidth()) {
LOI->NumSignBits = 1;
- LOI->Known.Zero = LOI->Known.Zero.zextOrTrunc(BitWidth);
- LOI->Known.One = LOI->Known.One.zextOrTrunc(BitWidth);
+ LOI->Known = LOI->Known.zextOrTrunc(BitWidth);
}
return LOI;
diff --git a/lib/CodeGen/SelectionDAG/ResourcePriorityQueue.cpp b/lib/CodeGen/SelectionDAG/ResourcePriorityQueue.cpp
index a1d70ab6f036..a21b4c733254 100644
--- a/lib/CodeGen/SelectionDAG/ResourcePriorityQueue.cpp
+++ b/lib/CodeGen/SelectionDAG/ResourcePriorityQueue.cpp
@@ -67,12 +67,11 @@ ResourcePriorityQueue::ResourcePriorityQueue(SelectionDAGISel *IS)
unsigned
ResourcePriorityQueue::numberRCValPredInSU(SUnit *SU, unsigned RCId) {
unsigned NumberDeps = 0;
- for (SUnit::pred_iterator I = SU->Preds.begin(), E = SU->Preds.end();
- I != E; ++I) {
- if (I->isCtrl())
+ for (SDep &Pred : SU->Preds) {
+ if (Pred.isCtrl())
continue;
- SUnit *PredSU = I->getSUnit();
+ SUnit *PredSU = Pred.getSUnit();
const SDNode *ScegN = PredSU->getNode();
if (!ScegN)
@@ -105,12 +104,11 @@ ResourcePriorityQueue::numberRCValPredInSU(SUnit *SU, unsigned RCId) {
unsigned ResourcePriorityQueue::numberRCValSuccInSU(SUnit *SU,
unsigned RCId) {
unsigned NumberDeps = 0;
- for (SUnit::const_succ_iterator I = SU->Succs.begin(), E = SU->Succs.end();
- I != E; ++I) {
- if (I->isCtrl())
+ for (const SDep &Succ : SU->Succs) {
+ if (Succ.isCtrl())
continue;
- SUnit *SuccSU = I->getSUnit();
+ SUnit *SuccSU = Succ.getSUnit();
const SDNode *ScegN = SuccSU->getNode();
if (!ScegN)
continue;
@@ -142,9 +140,8 @@ unsigned ResourcePriorityQueue::numberRCValSuccInSU(SUnit *SU,
static unsigned numberCtrlDepsInSU(SUnit *SU) {
unsigned NumberDeps = 0;
- for (SUnit::const_succ_iterator I = SU->Succs.begin(), E = SU->Succs.end();
- I != E; ++I)
- if (I->isCtrl())
+ for (const SDep &Succ : SU->Succs)
+ if (Succ.isCtrl())
NumberDeps++;
return NumberDeps;
@@ -152,9 +149,8 @@ static unsigned numberCtrlDepsInSU(SUnit *SU) {
static unsigned numberCtrlPredInSU(SUnit *SU) {
unsigned NumberDeps = 0;
- for (SUnit::pred_iterator I = SU->Preds.begin(), E = SU->Preds.end();
- I != E; ++I)
- if (I->isCtrl())
+ for (SDep &Pred : SU->Preds)
+ if (Pred.isCtrl())
NumberDeps++;
return NumberDeps;
@@ -212,15 +208,14 @@ bool resource_sort::operator()(const SUnit *LHS, const SUnit *RHS) const {
/// of SU, return it, otherwise return null.
SUnit *ResourcePriorityQueue::getSingleUnscheduledPred(SUnit *SU) {
SUnit *OnlyAvailablePred = nullptr;
- for (SUnit::const_pred_iterator I = SU->Preds.begin(), E = SU->Preds.end();
- I != E; ++I) {
- SUnit &Pred = *I->getSUnit();
- if (!Pred.isScheduled) {
+ for (const SDep &Pred : SU->Preds) {
+ SUnit &PredSU = *Pred.getSUnit();
+ if (!PredSU.isScheduled) {
// We found an available, but not scheduled, predecessor. If it's the
// only one we have found, keep track of it... otherwise give up.
- if (OnlyAvailablePred && OnlyAvailablePred != &Pred)
+ if (OnlyAvailablePred && OnlyAvailablePred != &PredSU)
return nullptr;
- OnlyAvailablePred = &Pred;
+ OnlyAvailablePred = &PredSU;
}
}
return OnlyAvailablePred;
@@ -230,9 +225,8 @@ void ResourcePriorityQueue::push(SUnit *SU) {
// Look at all of the successors of this node. Count the number of nodes that
// this node is the sole unscheduled node for.
unsigned NumNodesBlocking = 0;
- for (SUnit::const_succ_iterator I = SU->Succs.begin(), E = SU->Succs.end();
- I != E; ++I)
- if (getSingleUnscheduledPred(I->getSUnit()) == SU)
+ for (const SDep &Succ : SU->Succs)
+ if (getSingleUnscheduledPred(Succ.getSUnit()) == SU)
++NumNodesBlocking;
NumNodesSolelyBlocking[SU->NodeNum] = NumNodesBlocking;
@@ -269,14 +263,13 @@ bool ResourcePriorityQueue::isResourceAvailable(SUnit *SU) {
// Now see if there are no other dependencies
// to instructions already in the packet.
for (unsigned i = 0, e = Packet.size(); i != e; ++i)
- for (SUnit::const_succ_iterator I = Packet[i]->Succs.begin(),
- E = Packet[i]->Succs.end(); I != E; ++I) {
+ for (const SDep &Succ : Packet[i]->Succs) {
// Since we do not add pseudos to packets, might as well
// ignore order deps.
- if (I->isCtrl())
+ if (Succ.isCtrl())
continue;
- if (I->getSUnit() == SU)
+ if (Succ.getSUnit() == SU)
return false;
}
@@ -499,11 +492,10 @@ void ResourcePriorityQueue::scheduledNode(SUnit *SU) {
}
}
}
- for (SUnit::pred_iterator I = SU->Preds.begin(), E = SU->Preds.end();
- I != E; ++I) {
- if (I->isCtrl() || (I->getSUnit()->NumRegDefsLeft == 0))
+ for (SDep &Pred : SU->Preds) {
+ if (Pred.isCtrl() || (Pred.getSUnit()->NumRegDefsLeft == 0))
continue;
- --I->getSUnit()->NumRegDefsLeft;
+ --Pred.getSUnit()->NumRegDefsLeft;
}
}
@@ -515,10 +507,9 @@ void ResourcePriorityQueue::scheduledNode(SUnit *SU) {
// number of live ranges. All others, increase it.
unsigned NumberNonControlDeps = 0;
- for (SUnit::const_succ_iterator I = SU->Succs.begin(), E = SU->Succs.end();
- I != E; ++I) {
- adjustPriorityOfUnscheduledPreds(I->getSUnit());
- if (!I->isCtrl())
+ for (const SDep &Succ : SU->Succs) {
+ adjustPriorityOfUnscheduledPreds(Succ.getSUnit());
+ if (!Succ.isCtrl())
NumberNonControlDeps++;
}
@@ -595,8 +586,7 @@ SUnit *ResourcePriorityQueue::pop() {
std::vector<SUnit *>::iterator Best = Queue.begin();
if (!DisableDFASched) {
int BestCost = SUSchedulingCost(*Best);
- for (std::vector<SUnit *>::iterator I = std::next(Queue.begin()),
- E = Queue.end(); I != E; ++I) {
+ for (auto I = std::next(Queue.begin()), E = Queue.end(); I != E; ++I) {
if (SUSchedulingCost(*I) > BestCost) {
BestCost = SUSchedulingCost(*I);
@@ -606,8 +596,7 @@ SUnit *ResourcePriorityQueue::pop() {
}
// Use default TD scheduling mechanism.
else {
- for (std::vector<SUnit *>::iterator I = std::next(Queue.begin()),
- E = Queue.end(); I != E; ++I)
+ for (auto I = std::next(Queue.begin()), E = Queue.end(); I != E; ++I)
if (Picker(*Best, *I))
Best = I;
}
diff --git a/lib/CodeGen/SelectionDAG/ScheduleDAGFast.cpp b/lib/CodeGen/SelectionDAG/ScheduleDAGFast.cpp
index 62e7733ecd2b..d80a281279b6 100644
--- a/lib/CodeGen/SelectionDAG/ScheduleDAGFast.cpp
+++ b/lib/CodeGen/SelectionDAG/ScheduleDAGFast.cpp
@@ -160,18 +160,17 @@ void ScheduleDAGFast::ReleasePred(SUnit *SU, SDep *PredEdge) {
void ScheduleDAGFast::ReleasePredecessors(SUnit *SU, unsigned CurCycle) {
// Bottom up: release predecessors
- for (SUnit::pred_iterator I = SU->Preds.begin(), E = SU->Preds.end();
- I != E; ++I) {
- ReleasePred(SU, &*I);
- if (I->isAssignedRegDep()) {
+ for (SDep &Pred : SU->Preds) {
+ ReleasePred(SU, &Pred);
+ if (Pred.isAssignedRegDep()) {
// This is a physical register dependency and it's impossible or
// expensive to copy the register. Make sure nothing that can
// clobber the register is scheduled between the predecessor and
// this node.
- if (!LiveRegDefs[I->getReg()]) {
+ if (!LiveRegDefs[Pred.getReg()]) {
++NumLiveRegs;
- LiveRegDefs[I->getReg()] = I->getSUnit();
- LiveRegCycles[I->getReg()] = CurCycle;
+ LiveRegDefs[Pred.getReg()] = Pred.getSUnit();
+ LiveRegCycles[Pred.getReg()] = CurCycle;
}
}
}
@@ -191,16 +190,15 @@ void ScheduleDAGFast::ScheduleNodeBottomUp(SUnit *SU, unsigned CurCycle) {
ReleasePredecessors(SU, CurCycle);
// Release all the implicit physical register defs that are live.
- for (SUnit::succ_iterator I = SU->Succs.begin(), E = SU->Succs.end();
- I != E; ++I) {
- if (I->isAssignedRegDep()) {
- if (LiveRegCycles[I->getReg()] == I->getSUnit()->getHeight()) {
+ for (SDep &Succ : SU->Succs) {
+ if (Succ.isAssignedRegDep()) {
+ if (LiveRegCycles[Succ.getReg()] == Succ.getSUnit()->getHeight()) {
assert(NumLiveRegs > 0 && "NumLiveRegs is already zero!");
- assert(LiveRegDefs[I->getReg()] == SU &&
+ assert(LiveRegDefs[Succ.getReg()] == SU &&
"Physical register dependency violated?");
--NumLiveRegs;
- LiveRegDefs[I->getReg()] = nullptr;
- LiveRegCycles[I->getReg()] = 0;
+ LiveRegDefs[Succ.getReg()] = nullptr;
+ LiveRegCycles[Succ.getReg()] = 0;
}
}
}
@@ -282,22 +280,20 @@ SUnit *ScheduleDAGFast::CopyAndMoveSuccessors(SUnit *SU) {
SmallVector<SDep, 4> LoadPreds;
SmallVector<SDep, 4> NodePreds;
SmallVector<SDep, 4> NodeSuccs;
- for (SUnit::pred_iterator I = SU->Preds.begin(), E = SU->Preds.end();
- I != E; ++I) {
- if (I->isCtrl())
- ChainPred = *I;
- else if (I->getSUnit()->getNode() &&
- I->getSUnit()->getNode()->isOperandOf(LoadNode))
- LoadPreds.push_back(*I);
+ for (SDep &Pred : SU->Preds) {
+ if (Pred.isCtrl())
+ ChainPred = Pred;
+ else if (Pred.getSUnit()->getNode() &&
+ Pred.getSUnit()->getNode()->isOperandOf(LoadNode))
+ LoadPreds.push_back(Pred);
else
- NodePreds.push_back(*I);
+ NodePreds.push_back(Pred);
}
- for (SUnit::succ_iterator I = SU->Succs.begin(), E = SU->Succs.end();
- I != E; ++I) {
- if (I->isCtrl())
- ChainSuccs.push_back(*I);
+ for (SDep &Succ : SU->Succs) {
+ if (Succ.isCtrl())
+ ChainSuccs.push_back(Succ);
else
- NodeSuccs.push_back(*I);
+ NodeSuccs.push_back(Succ);
}
if (ChainPred.getSUnit()) {
@@ -354,21 +350,19 @@ SUnit *ScheduleDAGFast::CopyAndMoveSuccessors(SUnit *SU) {
NewSU = Clone(SU);
// New SUnit has the exact same predecessors.
- for (SUnit::pred_iterator I = SU->Preds.begin(), E = SU->Preds.end();
- I != E; ++I)
- if (!I->isArtificial())
- AddPred(NewSU, *I);
+ for (SDep &Pred : SU->Preds)
+ if (!Pred.isArtificial())
+ AddPred(NewSU, Pred);
// Only copy scheduled successors. Cut them from old node's successor
// list and move them over.
SmallVector<std::pair<SUnit *, SDep>, 4> DelDeps;
- for (SUnit::succ_iterator I = SU->Succs.begin(), E = SU->Succs.end();
- I != E; ++I) {
- if (I->isArtificial())
+ for (SDep &Succ : SU->Succs) {
+ if (Succ.isArtificial())
continue;
- SUnit *SuccSU = I->getSUnit();
+ SUnit *SuccSU = Succ.getSUnit();
if (SuccSU->isScheduled) {
- SDep D = *I;
+ SDep D = Succ;
D.setSUnit(NewSU);
AddPred(SuccSU, D);
D.setSUnit(SU);
@@ -399,16 +393,15 @@ void ScheduleDAGFast::InsertCopiesAndMoveSuccs(SUnit *SU, unsigned Reg,
// Only copy scheduled successors. Cut them from old node's successor
// list and move them over.
SmallVector<std::pair<SUnit *, SDep>, 4> DelDeps;
- for (SUnit::succ_iterator I = SU->Succs.begin(), E = SU->Succs.end();
- I != E; ++I) {
- if (I->isArtificial())
+ for (SDep &Succ : SU->Succs) {
+ if (Succ.isArtificial())
continue;
- SUnit *SuccSU = I->getSUnit();
+ SUnit *SuccSU = Succ.getSUnit();
if (SuccSU->isScheduled) {
- SDep D = *I;
+ SDep D = Succ;
D.setSUnit(CopyToSU);
AddPred(SuccSU, D);
- DelDeps.push_back(std::make_pair(SuccSU, *I));
+ DelDeps.push_back(std::make_pair(SuccSU, Succ));
}
}
for (unsigned i = 0, e = DelDeps.size(); i != e; ++i) {
@@ -479,10 +472,9 @@ bool ScheduleDAGFast::DelayForLiveRegsBottomUp(SUnit *SU,
SmallSet<unsigned, 4> RegAdded;
// If this node would clobber any "live" register, then it's not ready.
- for (SUnit::pred_iterator I = SU->Preds.begin(), E = SU->Preds.end();
- I != E; ++I) {
- if (I->isAssignedRegDep()) {
- CheckForLiveRegDef(I->getSUnit(), I->getReg(), LiveRegDefs,
+ for (SDep &Pred : SU->Preds) {
+ if (Pred.isAssignedRegDep()) {
+ CheckForLiveRegDef(Pred.getSUnit(), Pred.getReg(), LiveRegDefs,
RegAdded, LRegs, TRI);
}
}
@@ -755,9 +747,8 @@ void ScheduleDAGLinearize::Schedule() {
// Glue user must be scheduled together with the glue operand. So other
// users of the glue operand must be treated as its users.
SDNode *ImmGUser = Glue->getGluedUser();
- for (SDNode::use_iterator ui = Glue->use_begin(), ue = Glue->use_end();
- ui != ue; ++ui)
- if (*ui == ImmGUser)
+ for (const SDNode *U : Glue->uses())
+ if (U == ImmGUser)
--Degree;
GUser->setNodeId(UDegree + Degree);
Glue->setNodeId(1);
diff --git a/lib/CodeGen/SelectionDAG/ScheduleDAGRRList.cpp b/lib/CodeGen/SelectionDAG/ScheduleDAGRRList.cpp
index 69b76fbe57d2..4f4025d8ae6a 100644
--- a/lib/CodeGen/SelectionDAG/ScheduleDAGRRList.cpp
+++ b/lib/CodeGen/SelectionDAG/ScheduleDAGRRList.cpp
@@ -520,21 +520,20 @@ FindCallSeqStart(SDNode *N, unsigned &NestLevel, unsigned &MaxNest,
/// interference on flags.
void ScheduleDAGRRList::ReleasePredecessors(SUnit *SU) {
// Bottom up: release predecessors
- for (SUnit::pred_iterator I = SU->Preds.begin(), E = SU->Preds.end();
- I != E; ++I) {
- ReleasePred(SU, &*I);
- if (I->isAssignedRegDep()) {
+ for (SDep &Pred : SU->Preds) {
+ ReleasePred(SU, &Pred);
+ if (Pred.isAssignedRegDep()) {
// This is a physical register dependency and it's impossible or
// expensive to copy the register. Make sure nothing that can
// clobber the register is scheduled between the predecessor and
// this node.
- SUnit *RegDef = LiveRegDefs[I->getReg()]; (void)RegDef;
- assert((!RegDef || RegDef == SU || RegDef == I->getSUnit()) &&
+ SUnit *RegDef = LiveRegDefs[Pred.getReg()]; (void)RegDef;
+ assert((!RegDef || RegDef == SU || RegDef == Pred.getSUnit()) &&
"interference on register dependence");
- LiveRegDefs[I->getReg()] = I->getSUnit();
- if (!LiveRegGens[I->getReg()]) {
+ LiveRegDefs[Pred.getReg()] = Pred.getSUnit();
+ if (!LiveRegGens[Pred.getReg()]) {
++NumLiveRegs;
- LiveRegGens[I->getReg()] = SU;
+ LiveRegGens[Pred.getReg()] = SU;
}
}
}
@@ -733,15 +732,14 @@ void ScheduleDAGRRList::ScheduleNodeBottomUp(SUnit *SU) {
ReleasePredecessors(SU);
// Release all the implicit physical register defs that are live.
- for (SUnit::succ_iterator I = SU->Succs.begin(), E = SU->Succs.end();
- I != E; ++I) {
- // LiveRegDegs[I->getReg()] != SU when SU is a two-address node.
- if (I->isAssignedRegDep() && LiveRegDefs[I->getReg()] == SU) {
+ for (SDep &Succ : SU->Succs) {
+ // LiveRegDegs[Succ.getReg()] != SU when SU is a two-address node.
+ if (Succ.isAssignedRegDep() && LiveRegDefs[Succ.getReg()] == SU) {
assert(NumLiveRegs > 0 && "NumLiveRegs is already zero!");
--NumLiveRegs;
- LiveRegDefs[I->getReg()] = nullptr;
- LiveRegGens[I->getReg()] = nullptr;
- releaseInterferences(I->getReg());
+ LiveRegDefs[Succ.getReg()] = nullptr;
+ LiveRegGens[Succ.getReg()] = nullptr;
+ releaseInterferences(Succ.getReg());
}
}
// Release the special call resource dependence, if this is the beginning
@@ -802,17 +800,16 @@ void ScheduleDAGRRList::UnscheduleNodeBottomUp(SUnit *SU) {
DEBUG(dbgs() << "*** Unscheduling [" << SU->getHeight() << "]: ");
DEBUG(SU->dump(this));
- for (SUnit::pred_iterator I = SU->Preds.begin(), E = SU->Preds.end();
- I != E; ++I) {
- CapturePred(&*I);
- if (I->isAssignedRegDep() && SU == LiveRegGens[I->getReg()]){
+ for (SDep &Pred : SU->Preds) {
+ CapturePred(&Pred);
+ if (Pred.isAssignedRegDep() && SU == LiveRegGens[Pred.getReg()]){
assert(NumLiveRegs > 0 && "NumLiveRegs is already zero!");
- assert(LiveRegDefs[I->getReg()] == I->getSUnit() &&
+ assert(LiveRegDefs[Pred.getReg()] == Pred.getSUnit() &&
"Physical register dependency violated?");
--NumLiveRegs;
- LiveRegDefs[I->getReg()] = nullptr;
- LiveRegGens[I->getReg()] = nullptr;
- releaseInterferences(I->getReg());
+ LiveRegDefs[Pred.getReg()] = nullptr;
+ LiveRegGens[Pred.getReg()] = nullptr;
+ releaseInterferences(Pred.getReg());
}
}
@@ -895,7 +892,7 @@ void ScheduleDAGRRList::RestoreHazardCheckerBottomUp() {
std::vector<SUnit*>::const_iterator I = (Sequence.end() - LookAhead);
unsigned HazardCycle = (*I)->getHeight();
- for (std::vector<SUnit*>::const_iterator E = Sequence.end(); I != E; ++I) {
+ for (auto E = Sequence.end(); I != E; ++I) {
SUnit *SU = *I;
for (; SU->getHeight() > HazardCycle; ++HazardCycle) {
HazardRec->RecedeCycle();
@@ -1261,10 +1258,9 @@ DelayForLiveRegsBottomUp(SUnit *SU, SmallVectorImpl<unsigned> &LRegs) {
//
// If SU is the currently live definition of the same register that it uses,
// then we are free to schedule it.
- for (SUnit::pred_iterator I = SU->Preds.begin(), E = SU->Preds.end();
- I != E; ++I) {
- if (I->isAssignedRegDep() && LiveRegDefs[I->getReg()] != SU)
- CheckForLiveRegDef(I->getSUnit(), I->getReg(), LiveRegDefs.get(),
+ for (SDep &Pred : SU->Preds) {
+ if (Pred.isAssignedRegDep() && LiveRegDefs[Pred.getReg()] != SU)
+ CheckForLiveRegDef(Pred.getSUnit(), Pred.getReg(), LiveRegDefs.get(),
RegAdded, LRegs, TRI);
}
@@ -1743,8 +1739,7 @@ protected:
template<class SF>
static SUnit *popFromQueueImpl(std::vector<SUnit*> &Q, SF &Picker) {
std::vector<SUnit *>::iterator Best = Q.begin();
- for (std::vector<SUnit *>::iterator I = std::next(Q.begin()),
- E = Q.end(); I != E; ++I)
+ for (auto I = std::next(Q.begin()), E = Q.end(); I != E; ++I)
if (Picker(*Best, *I))
Best = I;
SUnit *V = *Best;
diff --git a/lib/CodeGen/SelectionDAG/SelectionDAG.cpp b/lib/CodeGen/SelectionDAG/SelectionDAG.cpp
index 9d949a2bbfa6..d605a1dc1c20 100644
--- a/lib/CodeGen/SelectionDAG/SelectionDAG.cpp
+++ b/lib/CodeGen/SelectionDAG/SelectionDAG.cpp
@@ -2017,8 +2017,7 @@ void SelectionDAG::computeKnownBits(SDValue Op, KnownBits &Known,
if (SrcOp.getValueSizeInBits() != BitWidth) {
assert(SrcOp.getValueSizeInBits() > BitWidth &&
"Expected BUILD_VECTOR implicit truncation");
- Known2.One = Known2.One.trunc(BitWidth);
- Known2.Zero = Known2.Zero.trunc(BitWidth);
+ Known2 = Known2.trunc(BitWidth);
}
// Known bits are the values that are shared by every demanded element.
@@ -2045,8 +2044,7 @@ void SelectionDAG::computeKnownBits(SDValue Op, KnownBits &Known,
if (M < 0) {
// For UNDEF elements, we don't know anything about the common state of
// the shuffle result.
- Known.One.clearAllBits();
- Known.Zero.clearAllBits();
+ Known.resetAll();
DemandedLHS.clearAllBits();
DemandedRHS.clearAllBits();
break;
@@ -2219,14 +2217,13 @@ void SelectionDAG::computeKnownBits(SDValue Op, KnownBits &Known,
// Also compute a conservative estimate for high known-0 bits.
// More trickiness is possible, but this is sufficient for the
// interesting case of alignment computation.
- Known.One.clearAllBits();
unsigned TrailZ = Known.Zero.countTrailingOnes() +
Known2.Zero.countTrailingOnes();
unsigned LeadZ = std::max(Known.Zero.countLeadingOnes() +
Known2.Zero.countLeadingOnes(),
BitWidth) - BitWidth;
- Known.Zero.clearAllBits();
+ Known.resetAll();
Known.Zero.setLowBits(std::min(TrailZ, BitWidth));
Known.Zero.setHighBits(std::min(LeadZ, BitWidth));
break;
@@ -2377,7 +2374,10 @@ void SelectionDAG::computeKnownBits(SDValue Op, KnownBits &Known,
break;
}
case ISD::CTPOP: {
- Known.Zero.setBitsFrom(Log2_32(BitWidth)+1);
+ computeKnownBits(Op.getOperand(0), Known2, DemandedElts, Depth + 1);
+ // If we know some of the bits are zero, they can't be one.
+ unsigned PossibleOnes = BitWidth - Known2.Zero.countPopulation();
+ Known.Zero.setBitsFrom(Log2_32(PossibleOnes) + 1);
break;
}
case ISD::LOAD: {
@@ -2396,24 +2396,20 @@ void SelectionDAG::computeKnownBits(SDValue Op, KnownBits &Known,
case ISD::ZERO_EXTEND_VECTOR_INREG: {
EVT InVT = Op.getOperand(0).getValueType();
unsigned InBits = InVT.getScalarSizeInBits();
- Known.Zero = Known.Zero.trunc(InBits);
- Known.One = Known.One.trunc(InBits);
+ Known = Known.trunc(InBits);
computeKnownBits(Op.getOperand(0), Known,
DemandedElts.zext(InVT.getVectorNumElements()),
Depth + 1);
- Known.Zero = Known.Zero.zext(BitWidth);
- Known.One = Known.One.zext(BitWidth);
+ Known = Known.zext(BitWidth);
Known.Zero.setBitsFrom(InBits);
break;
}
case ISD::ZERO_EXTEND: {
EVT InVT = Op.getOperand(0).getValueType();
unsigned InBits = InVT.getScalarSizeInBits();
- Known.Zero = Known.Zero.trunc(InBits);
- Known.One = Known.One.trunc(InBits);
+ Known = Known.trunc(InBits);
computeKnownBits(Op.getOperand(0), Known, DemandedElts, Depth + 1);
- Known.Zero = Known.Zero.zext(BitWidth);
- Known.One = Known.One.zext(BitWidth);
+ Known = Known.zext(BitWidth);
Known.Zero.setBitsFrom(InBits);
break;
}
@@ -2422,34 +2418,28 @@ void SelectionDAG::computeKnownBits(SDValue Op, KnownBits &Known,
EVT InVT = Op.getOperand(0).getValueType();
unsigned InBits = InVT.getScalarSizeInBits();
- Known.Zero = Known.Zero.trunc(InBits);
- Known.One = Known.One.trunc(InBits);
+ Known = Known.trunc(InBits);
computeKnownBits(Op.getOperand(0), Known, DemandedElts, Depth + 1);
// If the sign bit is known to be zero or one, then sext will extend
// it to the top bits, else it will just zext.
- Known.Zero = Known.Zero.sext(BitWidth);
- Known.One = Known.One.sext(BitWidth);
+ Known = Known.sext(BitWidth);
break;
}
case ISD::ANY_EXTEND: {
EVT InVT = Op.getOperand(0).getValueType();
unsigned InBits = InVT.getScalarSizeInBits();
- Known.Zero = Known.Zero.trunc(InBits);
- Known.One = Known.One.trunc(InBits);
+ Known = Known.trunc(InBits);
computeKnownBits(Op.getOperand(0), Known, Depth+1);
- Known.Zero = Known.Zero.zext(BitWidth);
- Known.One = Known.One.zext(BitWidth);
+ Known = Known.zext(BitWidth);
break;
}
case ISD::TRUNCATE: {
EVT InVT = Op.getOperand(0).getValueType();
unsigned InBits = InVT.getScalarSizeInBits();
- Known.Zero = Known.Zero.zext(InBits);
- Known.One = Known.One.zext(InBits);
+ Known = Known.zext(InBits);
computeKnownBits(Op.getOperand(0), Known, DemandedElts, Depth + 1);
- Known.Zero = Known.Zero.trunc(BitWidth);
- Known.One = Known.One.trunc(BitWidth);
+ Known = Known.trunc(BitWidth);
break;
}
case ISD::AssertZext: {
@@ -2606,8 +2596,7 @@ void SelectionDAG::computeKnownBits(SDValue Op, KnownBits &Known,
uint32_t Leaders = std::max(Known.Zero.countLeadingOnes(),
Known2.Zero.countLeadingOnes());
- Known.One.clearAllBits();
- Known.Zero.clearAllBits();
+ Known.resetAll();
Known.Zero.setHighBits(Leaders);
break;
}
@@ -2621,8 +2610,7 @@ void SelectionDAG::computeKnownBits(SDValue Op, KnownBits &Known,
Known.One = Known.One.getHiBits(Known.One.getBitWidth() - Index * BitWidth);
// Remove high part of known bit mask
- Known.Zero = Known.Zero.trunc(BitWidth);
- Known.One = Known.One.trunc(BitWidth);
+ Known = Known.trunc(BitWidth);
break;
}
case ISD::EXTRACT_VECTOR_ELT: {
@@ -2634,10 +2622,8 @@ void SelectionDAG::computeKnownBits(SDValue Op, KnownBits &Known,
const unsigned NumSrcElts = VecVT.getVectorNumElements();
// If BitWidth > EltBitWidth the value is anyext:ed. So we do not know
// anything about the extended bits.
- if (BitWidth > EltBitWidth) {
- Known.Zero = Known.Zero.trunc(EltBitWidth);
- Known.One = Known.One.trunc(EltBitWidth);
- }
+ if (BitWidth > EltBitWidth)
+ Known = Known.trunc(EltBitWidth);
ConstantSDNode *ConstEltNo = dyn_cast<ConstantSDNode>(EltNo);
if (ConstEltNo && ConstEltNo->getAPIntValue().ult(NumSrcElts)) {
// If we know the element index, just demand that vector element.
@@ -2648,10 +2634,8 @@ void SelectionDAG::computeKnownBits(SDValue Op, KnownBits &Known,
// Unknown element index, so ignore DemandedElts and demand them all.
computeKnownBits(InVec, Known, Depth + 1);
}
- if (BitWidth > EltBitWidth) {
- Known.Zero = Known.Zero.zext(BitWidth);
- Known.One = Known.One.zext(BitWidth);
- }
+ if (BitWidth > EltBitWidth)
+ Known = Known.zext(BitWidth);
break;
}
case ISD::INSERT_VECTOR_ELT: {
diff --git a/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp b/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp
index ba9e11798f15..50313e2da884 100644
--- a/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp
+++ b/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp
@@ -4992,45 +4992,33 @@ SelectionDAGBuilder::visitIntrinsicCall(const CallInst &I, unsigned Intrinsic) {
SDV = DAG.getConstantDbgValue(Variable, Expression, V, Offset, dl,
SDNodeOrder);
DAG.AddDbgValue(SDV, nullptr, false);
- } else {
- // Do not use getValue() in here; we don't want to generate code at
- // this point if it hasn't been done yet.
- SDValue N = NodeMap[V];
- if (!N.getNode() && isa<Argument>(V))
- // Check unused arguments map.
- N = UnusedArgNodeMap[V];
- if (N.getNode()) {
- if (!EmitFuncArgumentDbgValue(V, Variable, Expression, dl, Offset,
- false, N)) {
- SDV = getDbgValue(N, Variable, Expression, Offset, dl, SDNodeOrder);
- DAG.AddDbgValue(SDV, N.getNode(), false);
- }
- } else if (!V->use_empty() ) {
- // Do not call getValue(V) yet, as we don't want to generate code.
- // Remember it for later.
- DanglingDebugInfo DDI(&DI, dl, SDNodeOrder);
- DanglingDebugInfoMap[V] = DDI;
- } else {
- // We may expand this to cover more cases. One case where we have no
- // data available is an unreferenced parameter.
- DEBUG(dbgs() << "Dropping debug info for " << DI << "\n");
- }
+ return nullptr;
}
- // Build a debug info table entry.
- if (const BitCastInst *BCI = dyn_cast<BitCastInst>(V))
- V = BCI->getOperand(0);
- const AllocaInst *AI = dyn_cast<AllocaInst>(V);
- // Don't handle byval struct arguments or VLAs, for example.
- if (!AI) {
- DEBUG(dbgs() << "Dropping debug location info for:\n " << DI << "\n");
- DEBUG(dbgs() << " Last seen at:\n " << *V << "\n");
+ // Do not use getValue() in here; we don't want to generate code at
+ // this point if it hasn't been done yet.
+ SDValue N = NodeMap[V];
+ if (!N.getNode() && isa<Argument>(V)) // Check unused arguments map.
+ N = UnusedArgNodeMap[V];
+ if (N.getNode()) {
+ if (EmitFuncArgumentDbgValue(V, Variable, Expression, dl, Offset, false,
+ N))
+ return nullptr;
+ SDV = getDbgValue(N, Variable, Expression, Offset, dl, SDNodeOrder);
+ DAG.AddDbgValue(SDV, N.getNode(), false);
return nullptr;
}
- DenseMap<const AllocaInst*, int>::iterator SI =
- FuncInfo.StaticAllocaMap.find(AI);
- if (SI == FuncInfo.StaticAllocaMap.end())
- return nullptr; // VLAs.
+
+ if (!V->use_empty() ) {
+ // Do not call getValue(V) yet, as we don't want to generate code.
+ // Remember it for later.
+ DanglingDebugInfo DDI(&DI, dl, SDNodeOrder);
+ DanglingDebugInfoMap[V] = DDI;
+ return nullptr;
+ }
+
+ DEBUG(dbgs() << "Dropping debug location info for:\n " << DI << "\n");
+ DEBUG(dbgs() << " Last seen at:\n " << *V << "\n");
return nullptr;
}
@@ -5715,7 +5703,37 @@ SelectionDAGBuilder::visitIntrinsicCall(const CallInst &I, unsigned Intrinsic) {
setValue(&I, N);
return nullptr;
}
+ case Intrinsic::xray_customevent: {
+ // Here we want to make sure that the intrinsic behaves as if it has a
+ // specific calling convention, and only for x86_64.
+ // FIXME: Support other platforms later.
+ const auto &Triple = DAG.getTarget().getTargetTriple();
+ if (Triple.getArch() != Triple::x86_64 || !Triple.isOSLinux())
+ return nullptr;
+ SDLoc DL = getCurSDLoc();
+ SmallVector<SDValue, 8> Ops;
+
+ // We want to say that we always want the arguments in registers.
+ SDValue LogEntryVal = getValue(I.getArgOperand(0));
+ SDValue StrSizeVal = getValue(I.getArgOperand(1));
+ SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
+ SDValue Chain = getRoot();
+ Ops.push_back(LogEntryVal);
+ Ops.push_back(StrSizeVal);
+ Ops.push_back(Chain);
+
+ // We need to enforce the calling convention for the callsite, so that
+ // argument ordering is enforced correctly, and that register allocation can
+ // see that some registers may be assumed clobbered and have to preserve
+ // them across calls to the intrinsic.
+ MachineSDNode *MN = DAG.getMachineNode(TargetOpcode::PATCHABLE_EVENT_CALL,
+ DL, NodeTys, Ops);
+ SDValue patchableNode = SDValue(MN, 0);
+ DAG.setRoot(patchableNode);
+ setValue(&I, patchableNode);
+ return nullptr;
+ }
case Intrinsic::experimental_deoptimize:
LowerDeoptimizeCall(&I);
return nullptr;
diff --git a/lib/CodeGen/SelectionDAG/TargetLowering.cpp b/lib/CodeGen/SelectionDAG/TargetLowering.cpp
index 2d39ecd9779b..23f597db140c 100644
--- a/lib/CodeGen/SelectionDAG/TargetLowering.cpp
+++ b/lib/CodeGen/SelectionDAG/TargetLowering.cpp
@@ -561,8 +561,7 @@ bool TargetLowering::SimplifyDemandedBits(SDValue Op,
if (Known2.One.getBitWidth() != BitWidth) {
assert(Known2.getBitWidth() > BitWidth &&
"Expected BUILD_VECTOR implicit truncation");
- Known2.One = Known2.One.trunc(BitWidth);
- Known2.Zero = Known2.Zero.trunc(BitWidth);
+ Known2 = Known2.trunc(BitWidth);
}
// Known bits are the values that are shared by every element.
@@ -659,7 +658,7 @@ bool TargetLowering::SimplifyDemandedBits(SDValue Op,
// Output known-1 are known to be set if set in either the LHS | RHS.
Known.One |= Known2.One;
break;
- case ISD::XOR:
+ case ISD::XOR: {
if (SimplifyDemandedBits(Op.getOperand(1), NewMask, Known, TLO, Depth+1))
return true;
assert((Known.Zero & Known.One) == 0 && "Bits known to be one AND zero?");
@@ -704,28 +703,24 @@ bool TargetLowering::SimplifyDemandedBits(SDValue Op,
}
}
- // If the RHS is a constant, see if we can simplify it.
- // for XOR, we prefer to force bits to 1 if they will make a -1.
- // If we can't force bits, try to shrink the constant.
- if (ConstantSDNode *C = isConstOrConstSplat(Op.getOperand(1))) {
- APInt Expanded = C->getAPIntValue() | (~NewMask);
- // If we can expand it to have all bits set, do it.
- if (Expanded.isAllOnesValue()) {
- if (Expanded != C->getAPIntValue()) {
- EVT VT = Op.getValueType();
- SDValue New = TLO.DAG.getNode(Op.getOpcode(), dl,VT, Op.getOperand(0),
- TLO.DAG.getConstant(Expanded, dl, VT));
- return TLO.CombineTo(Op, New);
- }
- // If it already has all the bits set, nothing to change
- // but don't shrink either!
- } else if (ShrinkDemandedConstant(Op, NewMask, TLO)) {
- return true;
+ // If the RHS is a constant, see if we can change it. Don't alter a -1
+ // constant because that's a 'not' op, and that is better for combining and
+ // codegen.
+ ConstantSDNode *C = isConstOrConstSplat(Op.getOperand(1));
+ if (C && !C->isAllOnesValue()) {
+ if (NewMask.isSubsetOf(C->getAPIntValue())) {
+ // We're flipping all demanded bits. Flip the undemanded bits too.
+ SDValue New = TLO.DAG.getNOT(dl, Op.getOperand(0), Op.getValueType());
+ return TLO.CombineTo(Op, New);
}
+ // If we can't turn this into a 'not', try to shrink the constant.
+ if (ShrinkDemandedConstant(Op, NewMask, TLO))
+ return true;
}
Known = std::move(KnownOut);
break;
+ }
case ISD::SELECT:
if (SimplifyDemandedBits(Op.getOperand(2), NewMask, Known, TLO, Depth+1))
return true;
@@ -1091,8 +1086,7 @@ bool TargetLowering::SimplifyDemandedBits(SDValue Op,
if (SimplifyDemandedBits(Op.getOperand(0), InMask, Known, TLO, Depth+1))
return true;
assert((Known.Zero & Known.One) == 0 && "Bits known to be one AND zero?");
- Known.Zero = Known.Zero.zext(BitWidth);
- Known.One = Known.One.zext(BitWidth);
+ Known = Known.zext(BitWidth);
Known.Zero |= NewBits;
break;
}
@@ -1118,8 +1112,7 @@ bool TargetLowering::SimplifyDemandedBits(SDValue Op,
if (SimplifyDemandedBits(Op.getOperand(0), InDemandedBits, Known, TLO,
Depth+1))
return true;
- Known.Zero = Known.Zero.zext(BitWidth);
- Known.One = Known.One.zext(BitWidth);
+ Known = Known.zext(BitWidth);
// If the sign bit is known zero, convert this to a zero extend.
if (Known.Zero.intersects(InSignBit))
@@ -1143,8 +1136,7 @@ bool TargetLowering::SimplifyDemandedBits(SDValue Op,
if (SimplifyDemandedBits(Op.getOperand(0), InMask, Known, TLO, Depth+1))
return true;
assert((Known.Zero & Known.One) == 0 && "Bits known to be one AND zero?");
- Known.Zero = Known.Zero.zext(BitWidth);
- Known.One = Known.One.zext(BitWidth);
+ Known = Known.zext(BitWidth);
break;
}
case ISD::TRUNCATE: {
@@ -1154,8 +1146,7 @@ bool TargetLowering::SimplifyDemandedBits(SDValue Op,
APInt TruncMask = NewMask.zext(OperandBitWidth);
if (SimplifyDemandedBits(Op.getOperand(0), TruncMask, Known, TLO, Depth+1))
return true;
- Known.Zero = Known.Zero.trunc(BitWidth);
- Known.One = Known.One.trunc(BitWidth);
+ Known = Known.trunc(BitWidth);
// If the input is only used by this truncate, see if we can shrink it based
// on the known demanded bits.
@@ -1312,7 +1303,7 @@ void TargetLowering::computeKnownBitsForTargetNode(const SDValue Op,
Op.getOpcode() == ISD::INTRINSIC_VOID) &&
"Should use MaskedValueIsZero if you don't know whether Op"
" is a target node!");
- Known.Zero.clearAllBits(); Known.One.clearAllBits();
+ Known.resetAll();
}
/// This method can be implemented by targets that want to expose additional
diff --git a/lib/CodeGen/XRayInstrumentation.cpp b/lib/CodeGen/XRayInstrumentation.cpp
index 7d2848bdc13b..2df3602733f3 100644
--- a/lib/CodeGen/XRayInstrumentation.cpp
+++ b/lib/CodeGen/XRayInstrumentation.cpp
@@ -18,6 +18,8 @@
#include "llvm/CodeGen/MachineFunction.h"
#include "llvm/CodeGen/MachineFunctionPass.h"
#include "llvm/CodeGen/MachineInstrBuilder.h"
+#include "llvm/CodeGen/MachineLoopInfo.h"
+#include "llvm/CodeGen/MachineDominators.h"
#include "llvm/CodeGen/Passes.h"
#include "llvm/Support/TargetRegistry.h"
#include "llvm/Target/TargetInstrInfo.h"
@@ -33,6 +35,14 @@ struct XRayInstrumentation : public MachineFunctionPass {
initializeXRayInstrumentationPass(*PassRegistry::getPassRegistry());
}
+ void getAnalysisUsage(AnalysisUsage &AU) const override {
+ AU.setPreservesCFG();
+ AU.addRequired<MachineLoopInfo>();
+ AU.addPreserved<MachineLoopInfo>();
+ AU.addPreserved<MachineDominatorTree>();
+ MachineFunctionPass::getAnalysisUsage(AU);
+ }
+
bool runOnMachineFunction(MachineFunction &MF) override;
private:
@@ -43,7 +53,7 @@ private:
// This is the approach to go on CPUs which have a single RET instruction,
// like x86/x86_64.
void replaceRetWithPatchableRet(MachineFunction &MF,
- const TargetInstrInfo *TII);
+ const TargetInstrInfo *TII);
// Prepend the original return instruction with the exit sled code ("patchable
// function exit" pseudo-instruction), preserving the original return
@@ -54,13 +64,12 @@ private:
// have to call the trampoline and return from it to the original return
// instruction of the function being instrumented.
void prependRetWithPatchableExit(MachineFunction &MF,
- const TargetInstrInfo *TII);
+ const TargetInstrInfo *TII);
};
} // anonymous namespace
-void XRayInstrumentation::replaceRetWithPatchableRet(MachineFunction &MF,
- const TargetInstrInfo *TII)
-{
+void XRayInstrumentation::replaceRetWithPatchableRet(
+ MachineFunction &MF, const TargetInstrInfo *TII) {
// We look for *all* terminators and returns, then replace those with
// PATCHABLE_RET instructions.
SmallVector<MachineInstr *, 4> Terminators;
@@ -91,9 +100,8 @@ void XRayInstrumentation::replaceRetWithPatchableRet(MachineFunction &MF,
I->eraseFromParent();
}
-void XRayInstrumentation::prependRetWithPatchableExit(MachineFunction &MF,
- const TargetInstrInfo *TII)
-{
+void XRayInstrumentation::prependRetWithPatchableExit(
+ MachineFunction &MF, const TargetInstrInfo *TII) {
for (auto &MBB : MF) {
for (auto &T : MBB.terminators()) {
unsigned Opc = 0;
@@ -106,7 +114,7 @@ void XRayInstrumentation::prependRetWithPatchableExit(MachineFunction &MF,
if (Opc != 0) {
// Prepend the return instruction with PATCHABLE_FUNCTION_EXIT or
// PATCHABLE_TAIL_CALL .
- BuildMI(MBB, T, T.getDebugLoc(),TII->get(Opc));
+ BuildMI(MBB, T, T.getDebugLoc(), TII->get(Opc));
}
}
}
@@ -125,8 +133,13 @@ bool XRayInstrumentation::runOnMachineFunction(MachineFunction &MF) {
return false; // XRay threshold attribute not found.
if (Attr.getValueAsString().getAsInteger(10, XRayThreshold))
return false; // Invalid value for threshold.
- if (F.size() < XRayThreshold)
- return false; // Function is too small.
+
+ // Check if we have a loop.
+ // FIXME: Maybe make this smarter, and see whether the loops are dependent
+ // on inputs or side-effects?
+ MachineLoopInfo &MLI = getAnalysis<MachineLoopInfo>();
+ if (MLI.empty() && F.size() < XRayThreshold)
+ return false; // Function is too small and has no loops.
}
// We look for the first non-empty MachineBasicBlock, so that we can insert
@@ -142,12 +155,10 @@ bool XRayInstrumentation::runOnMachineFunction(MachineFunction &MF) {
if (!MF.getSubtarget().isXRaySupported()) {
FirstMI.emitError("An attempt to perform XRay instrumentation for an"
- " unsupported target.");
+ " unsupported target.");
return false;
}
- // FIXME: Do the loop triviality analysis here or in an earlier pass.
-
// First, insert an PATCHABLE_FUNCTION_ENTER as the first instruction of the
// MachineFunction.
BuildMI(FirstMBB, FirstMI, FirstMI.getDebugLoc(),
@@ -176,5 +187,8 @@ bool XRayInstrumentation::runOnMachineFunction(MachineFunction &MF) {
char XRayInstrumentation::ID = 0;
char &llvm::XRayInstrumentationID = XRayInstrumentation::ID;
-INITIALIZE_PASS(XRayInstrumentation, "xray-instrumentation", "Insert XRay ops",
- false, false)
+INITIALIZE_PASS_BEGIN(XRayInstrumentation, "xray-instrumentation",
+ "Insert XRay ops", false, false)
+INITIALIZE_PASS_DEPENDENCY(MachineLoopInfo)
+INITIALIZE_PASS_END(XRayInstrumentation, "xray-instrumentation",
+ "Insert XRay ops", false, false)
diff --git a/lib/DebugInfo/CodeView/TypeDatabase.cpp b/lib/DebugInfo/CodeView/TypeDatabase.cpp
index efaba4646ffe..5b8841041f88 100644
--- a/lib/DebugInfo/CodeView/TypeDatabase.cpp
+++ b/lib/DebugInfo/CodeView/TypeDatabase.cpp
@@ -65,6 +65,11 @@ static const SimpleTypeEntry SimpleTypeNames[] = {
{"__bool64*", SimpleTypeKind::Boolean64},
};
+TypeDatabase::TypeDatabase(uint32_t ExpectedSize) : TypeNameStorage(Allocator) {
+ CVUDTNames.reserve(ExpectedSize);
+ TypeRecords.reserve(ExpectedSize);
+}
+
/// Gets the type index for the next type record.
TypeIndex TypeDatabase::getNextTypeIndex() const {
return TypeIndex(TypeIndex::FirstNonSimpleIndex + CVUDTNames.size());
diff --git a/lib/DebugInfo/DWARF/DWARFContext.cpp b/lib/DebugInfo/DWARF/DWARFContext.cpp
index 573d37d77fee..246899ac12b9 100644
--- a/lib/DebugInfo/DWARF/DWARFContext.cpp
+++ b/lib/DebugInfo/DWARF/DWARFContext.cpp
@@ -692,6 +692,10 @@ DWARFContext::getLineTableForUnit(DWARFUnit *U) {
if (const DWARFLineTable *lt = Line->getLineTable(stmtOffset))
return lt;
+ // Make sure the offset is good before we try to parse.
+ if (stmtOffset >= U->getLineSection().size())
+ return nullptr;
+
// We have to parse it first.
DataExtractor lineData(U->getLineSection(), isLittleEndian(),
U->getAddressByteSize());
@@ -953,6 +957,26 @@ static bool isRelocScattered(const object::ObjectFile &Obj,
return MachObj->isRelocationScattered(RelocInfo);
}
+Error DWARFContextInMemory::maybeDecompress(const SectionRef &Sec,
+ StringRef Name, StringRef &Data) {
+ if (!Decompressor::isCompressed(Sec))
+ return Error::success();
+
+ Expected<Decompressor> Decompressor =
+ Decompressor::create(Name, Data, IsLittleEndian, AddressSize == 8);
+ if (!Decompressor)
+ return Decompressor.takeError();
+
+ SmallString<32> Out;
+ if (auto Err = Decompressor->decompress(Out))
+ return Err;
+
+ UncompressedSections.emplace_back(std::move(Out));
+ Data = UncompressedSections.back();
+
+ return Error::success();
+}
+
DWARFContextInMemory::DWARFContextInMemory(const object::ObjectFile &Obj,
const LoadedObjectInfo *L)
: IsLittleEndian(Obj.isLittleEndian()),
@@ -976,16 +1000,11 @@ DWARFContextInMemory::DWARFContextInMemory(const object::ObjectFile &Obj,
if (!L || !L->getLoadedSectionContents(*RelocatedSection,data))
Section.getContents(data);
- if (Decompressor::isCompressed(Section)) {
- Expected<Decompressor> Decompressor =
- Decompressor::create(name, data, IsLittleEndian, AddressSize == 8);
- if (!Decompressor)
- continue;
- SmallString<32> Out;
- if (auto Err = Decompressor->decompress(Out))
- continue;
- UncompressedSections.emplace_back(std::move(Out));
- data = UncompressedSections.back();
+ if (auto Err = maybeDecompress(Section, name, data)) {
+ errs() << "error: failed to decompress '" + name + "', " +
+ toString(std::move(Err))
+ << '\n';
+ continue;
}
// Compressed sections names in GNU style starts from ".z",
diff --git a/lib/DebugInfo/DWARF/DWARFFormValue.cpp b/lib/DebugInfo/DWARF/DWARFFormValue.cpp
index 7f827de89240..1cbd3ea2c869 100644
--- a/lib/DebugInfo/DWARF/DWARFFormValue.cpp
+++ b/lib/DebugInfo/DWARF/DWARFFormValue.cpp
@@ -7,13 +7,13 @@
//
//===----------------------------------------------------------------------===//
+#include "llvm/DebugInfo/DWARF/DWARFFormValue.h"
#include "SyntaxHighlighting.h"
#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/None.h"
#include "llvm/ADT/Optional.h"
#include "llvm/ADT/StringRef.h"
#include "llvm/DebugInfo/DWARF/DWARFContext.h"
-#include "llvm/DebugInfo/DWARF/DWARFFormValue.h"
#include "llvm/DebugInfo/DWARF/DWARFRelocMap.h"
#include "llvm/DebugInfo/DWARF/DWARFUnit.h"
#include "llvm/Support/Dwarf.h"
@@ -29,34 +29,34 @@ using namespace dwarf;
using namespace syntax;
static const DWARFFormValue::FormClass DWARF4FormClasses[] = {
- DWARFFormValue::FC_Unknown, // 0x0
- DWARFFormValue::FC_Address, // 0x01 DW_FORM_addr
- DWARFFormValue::FC_Unknown, // 0x02 unused
- DWARFFormValue::FC_Block, // 0x03 DW_FORM_block2
- DWARFFormValue::FC_Block, // 0x04 DW_FORM_block4
- DWARFFormValue::FC_Constant, // 0x05 DW_FORM_data2
- // --- These can be FC_SectionOffset in DWARF3 and below:
- DWARFFormValue::FC_Constant, // 0x06 DW_FORM_data4
- DWARFFormValue::FC_Constant, // 0x07 DW_FORM_data8
- // ---
- DWARFFormValue::FC_String, // 0x08 DW_FORM_string
- DWARFFormValue::FC_Block, // 0x09 DW_FORM_block
- DWARFFormValue::FC_Block, // 0x0a DW_FORM_block1
- DWARFFormValue::FC_Constant, // 0x0b DW_FORM_data1
- DWARFFormValue::FC_Flag, // 0x0c DW_FORM_flag
- DWARFFormValue::FC_Constant, // 0x0d DW_FORM_sdata
- DWARFFormValue::FC_String, // 0x0e DW_FORM_strp
- DWARFFormValue::FC_Constant, // 0x0f DW_FORM_udata
- DWARFFormValue::FC_Reference, // 0x10 DW_FORM_ref_addr
- DWARFFormValue::FC_Reference, // 0x11 DW_FORM_ref1
- DWARFFormValue::FC_Reference, // 0x12 DW_FORM_ref2
- DWARFFormValue::FC_Reference, // 0x13 DW_FORM_ref4
- DWARFFormValue::FC_Reference, // 0x14 DW_FORM_ref8
- DWARFFormValue::FC_Reference, // 0x15 DW_FORM_ref_udata
- DWARFFormValue::FC_Indirect, // 0x16 DW_FORM_indirect
- DWARFFormValue::FC_SectionOffset, // 0x17 DW_FORM_sec_offset
- DWARFFormValue::FC_Exprloc, // 0x18 DW_FORM_exprloc
- DWARFFormValue::FC_Flag, // 0x19 DW_FORM_flag_present
+ DWARFFormValue::FC_Unknown, // 0x0
+ DWARFFormValue::FC_Address, // 0x01 DW_FORM_addr
+ DWARFFormValue::FC_Unknown, // 0x02 unused
+ DWARFFormValue::FC_Block, // 0x03 DW_FORM_block2
+ DWARFFormValue::FC_Block, // 0x04 DW_FORM_block4
+ DWARFFormValue::FC_Constant, // 0x05 DW_FORM_data2
+ // --- These can be FC_SectionOffset in DWARF3 and below:
+ DWARFFormValue::FC_Constant, // 0x06 DW_FORM_data4
+ DWARFFormValue::FC_Constant, // 0x07 DW_FORM_data8
+ // ---
+ DWARFFormValue::FC_String, // 0x08 DW_FORM_string
+ DWARFFormValue::FC_Block, // 0x09 DW_FORM_block
+ DWARFFormValue::FC_Block, // 0x0a DW_FORM_block1
+ DWARFFormValue::FC_Constant, // 0x0b DW_FORM_data1
+ DWARFFormValue::FC_Flag, // 0x0c DW_FORM_flag
+ DWARFFormValue::FC_Constant, // 0x0d DW_FORM_sdata
+ DWARFFormValue::FC_String, // 0x0e DW_FORM_strp
+ DWARFFormValue::FC_Constant, // 0x0f DW_FORM_udata
+ DWARFFormValue::FC_Reference, // 0x10 DW_FORM_ref_addr
+ DWARFFormValue::FC_Reference, // 0x11 DW_FORM_ref1
+ DWARFFormValue::FC_Reference, // 0x12 DW_FORM_ref2
+ DWARFFormValue::FC_Reference, // 0x13 DW_FORM_ref4
+ DWARFFormValue::FC_Reference, // 0x14 DW_FORM_ref8
+ DWARFFormValue::FC_Reference, // 0x15 DW_FORM_ref_udata
+ DWARFFormValue::FC_Indirect, // 0x16 DW_FORM_indirect
+ DWARFFormValue::FC_SectionOffset, // 0x17 DW_FORM_sec_offset
+ DWARFFormValue::FC_Exprloc, // 0x18 DW_FORM_exprloc
+ DWARFFormValue::FC_Flag, // 0x19 DW_FORM_flag_present
};
namespace {
@@ -83,10 +83,10 @@ public:
uint8_t getDwarfOffsetByteSize() const {
switch (Format) {
- case dwarf::DwarfFormat::DWARF32:
- return 4;
- case dwarf::DwarfFormat::DWARF64:
- return 8;
+ case dwarf::DwarfFormat::DWARF32:
+ return 4;
+ case dwarf::DwarfFormat::DWARF64:
+ return 8;
}
llvm_unreachable("Invalid Format value");
}
@@ -97,83 +97,83 @@ public:
template <class T>
static Optional<uint8_t> getFixedByteSize(dwarf::Form Form, const T *U) {
switch (Form) {
- case DW_FORM_addr:
- if (U)
- return U->getAddressByteSize();
- return None;
+ case DW_FORM_addr:
+ if (U)
+ return U->getAddressByteSize();
+ return None;
- case DW_FORM_block: // ULEB128 length L followed by L bytes.
- case DW_FORM_block1: // 1 byte length L followed by L bytes.
- case DW_FORM_block2: // 2 byte length L followed by L bytes.
- case DW_FORM_block4: // 4 byte length L followed by L bytes.
- case DW_FORM_string: // C-string with null terminator.
- case DW_FORM_sdata: // SLEB128.
- case DW_FORM_udata: // ULEB128.
- case DW_FORM_ref_udata: // ULEB128.
- case DW_FORM_indirect: // ULEB128.
- case DW_FORM_exprloc: // ULEB128 length L followed by L bytes.
- case DW_FORM_strx: // ULEB128.
- case DW_FORM_addrx: // ULEB128.
- case DW_FORM_loclistx: // ULEB128.
- case DW_FORM_rnglistx: // ULEB128.
- case DW_FORM_GNU_addr_index: // ULEB128.
- case DW_FORM_GNU_str_index: // ULEB128.
- return None;
+ case DW_FORM_block: // ULEB128 length L followed by L bytes.
+ case DW_FORM_block1: // 1 byte length L followed by L bytes.
+ case DW_FORM_block2: // 2 byte length L followed by L bytes.
+ case DW_FORM_block4: // 4 byte length L followed by L bytes.
+ case DW_FORM_string: // C-string with null terminator.
+ case DW_FORM_sdata: // SLEB128.
+ case DW_FORM_udata: // ULEB128.
+ case DW_FORM_ref_udata: // ULEB128.
+ case DW_FORM_indirect: // ULEB128.
+ case DW_FORM_exprloc: // ULEB128 length L followed by L bytes.
+ case DW_FORM_strx: // ULEB128.
+ case DW_FORM_addrx: // ULEB128.
+ case DW_FORM_loclistx: // ULEB128.
+ case DW_FORM_rnglistx: // ULEB128.
+ case DW_FORM_GNU_addr_index: // ULEB128.
+ case DW_FORM_GNU_str_index: // ULEB128.
+ return None;
- case DW_FORM_ref_addr:
- if (U)
- return U->getRefAddrByteSize();
- return None;
+ case DW_FORM_ref_addr:
+ if (U)
+ return U->getRefAddrByteSize();
+ return None;
- case DW_FORM_flag:
- case DW_FORM_data1:
- case DW_FORM_ref1:
- case DW_FORM_strx1:
- case DW_FORM_addrx1:
- return 1;
+ case DW_FORM_flag:
+ case DW_FORM_data1:
+ case DW_FORM_ref1:
+ case DW_FORM_strx1:
+ case DW_FORM_addrx1:
+ return 1;
- case DW_FORM_data2:
- case DW_FORM_ref2:
- case DW_FORM_strx2:
- case DW_FORM_addrx2:
- return 2;
+ case DW_FORM_data2:
+ case DW_FORM_ref2:
+ case DW_FORM_strx2:
+ case DW_FORM_addrx2:
+ return 2;
- case DW_FORM_data4:
- case DW_FORM_ref4:
- case DW_FORM_ref_sup4:
- case DW_FORM_strx4:
- case DW_FORM_addrx4:
- return 4;
+ case DW_FORM_data4:
+ case DW_FORM_ref4:
+ case DW_FORM_ref_sup4:
+ case DW_FORM_strx4:
+ case DW_FORM_addrx4:
+ return 4;
- case DW_FORM_strp:
- case DW_FORM_GNU_ref_alt:
- case DW_FORM_GNU_strp_alt:
- case DW_FORM_line_strp:
- case DW_FORM_sec_offset:
- case DW_FORM_strp_sup:
- if (U)
- return U->getDwarfOffsetByteSize();
- return None;
+ case DW_FORM_strp:
+ case DW_FORM_GNU_ref_alt:
+ case DW_FORM_GNU_strp_alt:
+ case DW_FORM_line_strp:
+ case DW_FORM_sec_offset:
+ case DW_FORM_strp_sup:
+ if (U)
+ return U->getDwarfOffsetByteSize();
+ return None;
- case DW_FORM_data8:
- case DW_FORM_ref8:
- case DW_FORM_ref_sig8:
- case DW_FORM_ref_sup8:
- return 8;
+ case DW_FORM_data8:
+ case DW_FORM_ref8:
+ case DW_FORM_ref_sig8:
+ case DW_FORM_ref_sup8:
+ return 8;
- case DW_FORM_flag_present:
- return 0;
+ case DW_FORM_flag_present:
+ return 0;
- case DW_FORM_data16:
- return 16;
+ case DW_FORM_data16:
+ return 16;
- case DW_FORM_implicit_const:
- // The implicit value is stored in the abbreviation as a SLEB128, and
- // there no data in debug info.
- return 0;
+ case DW_FORM_implicit_const:
+ // The implicit value is stored in the abbreviation as a SLEB128, and
+ // there no data in debug info.
+ return 0;
- default:
- llvm_unreachable("Handle this form in this switch statement");
+ default:
+ llvm_unreachable("Handle this form in this switch statement");
}
return None;
}
@@ -184,91 +184,91 @@ static bool skipFormValue(dwarf::Form Form, const DataExtractor &DebugInfoData,
bool Indirect = false;
do {
switch (Form) {
- // Blocks of inlined data that have a length field and the data bytes
- // inlined in the .debug_info.
- case DW_FORM_exprloc:
- case DW_FORM_block: {
- uint64_t size = DebugInfoData.getULEB128(OffsetPtr);
- *OffsetPtr += size;
- return true;
- }
- case DW_FORM_block1: {
- uint8_t size = DebugInfoData.getU8(OffsetPtr);
- *OffsetPtr += size;
- return true;
- }
- case DW_FORM_block2: {
- uint16_t size = DebugInfoData.getU16(OffsetPtr);
- *OffsetPtr += size;
- return true;
- }
- case DW_FORM_block4: {
- uint32_t size = DebugInfoData.getU32(OffsetPtr);
- *OffsetPtr += size;
+ // Blocks of inlined data that have a length field and the data bytes
+ // inlined in the .debug_info.
+ case DW_FORM_exprloc:
+ case DW_FORM_block: {
+ uint64_t size = DebugInfoData.getULEB128(OffsetPtr);
+ *OffsetPtr += size;
+ return true;
+ }
+ case DW_FORM_block1: {
+ uint8_t size = DebugInfoData.getU8(OffsetPtr);
+ *OffsetPtr += size;
+ return true;
+ }
+ case DW_FORM_block2: {
+ uint16_t size = DebugInfoData.getU16(OffsetPtr);
+ *OffsetPtr += size;
+ return true;
+ }
+ case DW_FORM_block4: {
+ uint32_t size = DebugInfoData.getU32(OffsetPtr);
+ *OffsetPtr += size;
+ return true;
+ }
+
+ // Inlined NULL terminated C-strings.
+ case DW_FORM_string:
+ DebugInfoData.getCStr(OffsetPtr);
+ return true;
+
+ case DW_FORM_addr:
+ case DW_FORM_ref_addr:
+ case DW_FORM_flag_present:
+ case DW_FORM_data1:
+ case DW_FORM_data2:
+ case DW_FORM_data4:
+ case DW_FORM_data8:
+ case DW_FORM_flag:
+ case DW_FORM_ref1:
+ case DW_FORM_ref2:
+ case DW_FORM_ref4:
+ case DW_FORM_ref8:
+ case DW_FORM_ref_sig8:
+ case DW_FORM_ref_sup4:
+ case DW_FORM_ref_sup8:
+ case DW_FORM_strx1:
+ case DW_FORM_strx2:
+ case DW_FORM_strx4:
+ case DW_FORM_addrx1:
+ case DW_FORM_addrx2:
+ case DW_FORM_addrx4:
+ case DW_FORM_sec_offset:
+ case DW_FORM_strp:
+ case DW_FORM_strp_sup:
+ case DW_FORM_line_strp:
+ case DW_FORM_GNU_ref_alt:
+ case DW_FORM_GNU_strp_alt:
+ if (Optional<uint8_t> FixedSize = ::getFixedByteSize(Form, U)) {
+ *OffsetPtr += *FixedSize;
return true;
}
+ return false;
- // Inlined NULL terminated C-strings.
- case DW_FORM_string:
- DebugInfoData.getCStr(OffsetPtr);
- return true;
+ // signed or unsigned LEB 128 values.
+ case DW_FORM_sdata:
+ DebugInfoData.getSLEB128(OffsetPtr);
+ return true;
- case DW_FORM_addr:
- case DW_FORM_ref_addr:
- case DW_FORM_flag_present:
- case DW_FORM_data1:
- case DW_FORM_data2:
- case DW_FORM_data4:
- case DW_FORM_data8:
- case DW_FORM_flag:
- case DW_FORM_ref1:
- case DW_FORM_ref2:
- case DW_FORM_ref4:
- case DW_FORM_ref8:
- case DW_FORM_ref_sig8:
- case DW_FORM_ref_sup4:
- case DW_FORM_ref_sup8:
- case DW_FORM_strx1:
- case DW_FORM_strx2:
- case DW_FORM_strx4:
- case DW_FORM_addrx1:
- case DW_FORM_addrx2:
- case DW_FORM_addrx4:
- case DW_FORM_sec_offset:
- case DW_FORM_strp:
- case DW_FORM_strp_sup:
- case DW_FORM_line_strp:
- case DW_FORM_GNU_ref_alt:
- case DW_FORM_GNU_strp_alt:
- if (Optional<uint8_t> FixedSize = ::getFixedByteSize(Form, U)) {
- *OffsetPtr += *FixedSize;
- return true;
- }
- return false;
+ case DW_FORM_udata:
+ case DW_FORM_ref_udata:
+ case DW_FORM_strx:
+ case DW_FORM_addrx:
+ case DW_FORM_loclistx:
+ case DW_FORM_rnglistx:
+ case DW_FORM_GNU_addr_index:
+ case DW_FORM_GNU_str_index:
+ DebugInfoData.getULEB128(OffsetPtr);
+ return true;
- // signed or unsigned LEB 128 values.
- case DW_FORM_sdata:
- DebugInfoData.getSLEB128(OffsetPtr);
- return true;
+ case DW_FORM_indirect:
+ Indirect = true;
+ Form = static_cast<dwarf::Form>(DebugInfoData.getULEB128(OffsetPtr));
+ break;
- case DW_FORM_udata:
- case DW_FORM_ref_udata:
- case DW_FORM_strx:
- case DW_FORM_addrx:
- case DW_FORM_loclistx:
- case DW_FORM_rnglistx:
- case DW_FORM_GNU_addr_index:
- case DW_FORM_GNU_str_index:
- DebugInfoData.getULEB128(OffsetPtr);
- return true;
-
- case DW_FORM_indirect:
- Indirect = true;
- Form = static_cast<dwarf::Form>(DebugInfoData.getULEB128(OffsetPtr));
- break;
-
- default:
- return false;
+ default:
+ return false;
}
} while (Indirect);
return true;
@@ -316,87 +316,84 @@ bool DWARFFormValue::isFormClass(DWARFFormValue::FormClass FC) const {
FC == FC_SectionOffset;
}
-bool DWARFFormValue::extractValue(const DataExtractor &data,
- uint32_t *offset_ptr,
- const DWARFUnit *cu) {
- U = cu;
- bool indirect = false;
- bool is_block = false;
+bool DWARFFormValue::extractValue(const DataExtractor &Data,
+ uint32_t *OffsetPtr, const DWARFUnit *CU) {
+ U = CU;
+ bool Indirect = false;
+ bool IsBlock = false;
Value.data = nullptr;
// Read the value for the form into value and follow and DW_FORM_indirect
// instances we run into
do {
- indirect = false;
+ Indirect = false;
switch (Form) {
case DW_FORM_addr:
case DW_FORM_ref_addr: {
if (!U)
return false;
- uint16_t AddrSize =
- (Form == DW_FORM_addr)
- ? U->getAddressByteSize()
- : U->getRefAddrByteSize();
+ uint16_t AddrSize = (Form == DW_FORM_addr) ? U->getAddressByteSize()
+ : U->getRefAddrByteSize();
Value.uval =
- getRelocatedValue(data, AddrSize, offset_ptr, U->getRelocMap());
+ getRelocatedValue(Data, AddrSize, OffsetPtr, U->getRelocMap());
break;
}
case DW_FORM_exprloc:
case DW_FORM_block:
- Value.uval = data.getULEB128(offset_ptr);
- is_block = true;
+ Value.uval = Data.getULEB128(OffsetPtr);
+ IsBlock = true;
break;
case DW_FORM_block1:
- Value.uval = data.getU8(offset_ptr);
- is_block = true;
+ Value.uval = Data.getU8(OffsetPtr);
+ IsBlock = true;
break;
case DW_FORM_block2:
- Value.uval = data.getU16(offset_ptr);
- is_block = true;
+ Value.uval = Data.getU16(OffsetPtr);
+ IsBlock = true;
break;
case DW_FORM_block4:
- Value.uval = data.getU32(offset_ptr);
- is_block = true;
+ Value.uval = Data.getU32(OffsetPtr);
+ IsBlock = true;
break;
case DW_FORM_data1:
case DW_FORM_ref1:
case DW_FORM_flag:
case DW_FORM_strx1:
case DW_FORM_addrx1:
- Value.uval = data.getU8(offset_ptr);
+ Value.uval = Data.getU8(OffsetPtr);
break;
case DW_FORM_data2:
case DW_FORM_ref2:
case DW_FORM_strx2:
case DW_FORM_addrx2:
- Value.uval = data.getU16(offset_ptr);
+ Value.uval = Data.getU16(OffsetPtr);
break;
case DW_FORM_data4:
case DW_FORM_ref4:
case DW_FORM_ref_sup4:
case DW_FORM_strx4:
case DW_FORM_addrx4: {
- const RelocAddrMap* RelocMap = U ? U->getRelocMap() : nullptr;
- Value.uval = getRelocatedValue(data, 4, offset_ptr, RelocMap);
+ const RelocAddrMap *RelocMap = U ? U->getRelocMap() : nullptr;
+ Value.uval = getRelocatedValue(Data, 4, OffsetPtr, RelocMap);
break;
}
case DW_FORM_data8:
case DW_FORM_ref8:
case DW_FORM_ref_sup8:
- Value.uval = data.getU64(offset_ptr);
+ Value.uval = Data.getU64(OffsetPtr);
break;
case DW_FORM_sdata:
- Value.sval = data.getSLEB128(offset_ptr);
+ Value.sval = Data.getSLEB128(OffsetPtr);
break;
case DW_FORM_udata:
case DW_FORM_ref_udata:
- Value.uval = data.getULEB128(offset_ptr);
+ Value.uval = Data.getULEB128(OffsetPtr);
break;
case DW_FORM_string:
- Value.cstr = data.getCStr(offset_ptr);
+ Value.cstr = Data.getCStr(OffsetPtr);
break;
case DW_FORM_indirect:
- Form = static_cast<dwarf::Form>(data.getULEB128(offset_ptr));
- indirect = true;
+ Form = static_cast<dwarf::Form>(Data.getULEB128(OffsetPtr));
+ Indirect = true;
break;
case DW_FORM_strp:
case DW_FORM_sec_offset:
@@ -406,82 +403,93 @@ bool DWARFFormValue::extractValue(const DataExtractor &data,
case DW_FORM_strp_sup: {
if (!U)
return false;
- Value.uval = getRelocatedValue(data, U->getDwarfOffsetByteSize(),
- offset_ptr, U->getRelocMap());
+ Value.uval = getRelocatedValue(Data, U->getDwarfOffsetByteSize(),
+ OffsetPtr, U->getRelocMap());
break;
}
case DW_FORM_flag_present:
Value.uval = 1;
break;
case DW_FORM_ref_sig8:
- Value.uval = data.getU64(offset_ptr);
+ Value.uval = Data.getU64(OffsetPtr);
break;
case DW_FORM_GNU_addr_index:
case DW_FORM_GNU_str_index:
- Value.uval = data.getULEB128(offset_ptr);
+ Value.uval = Data.getULEB128(OffsetPtr);
break;
default:
// DWARFFormValue::skipValue() will have caught this and caused all
// DWARF DIEs to fail to be parsed, so this code is not be reachable.
llvm_unreachable("unsupported form");
}
- } while (indirect);
+ } while (Indirect);
- if (is_block) {
- StringRef str = data.getData().substr(*offset_ptr, Value.uval);
+ if (IsBlock) {
+ StringRef Str = Data.getData().substr(*OffsetPtr, Value.uval);
Value.data = nullptr;
- if (!str.empty()) {
- Value.data = reinterpret_cast<const uint8_t *>(str.data());
- *offset_ptr += Value.uval;
+ if (!Str.empty()) {
+ Value.data = reinterpret_cast<const uint8_t *>(Str.data());
+ *OffsetPtr += Value.uval;
}
}
return true;
}
-bool DWARFFormValue::skipValue(DataExtractor DebugInfoData,
- uint32_t *offset_ptr, const DWARFUnit *U) const {
- return DWARFFormValue::skipValue(Form, DebugInfoData, offset_ptr, U);
+bool DWARFFormValue::skipValue(DataExtractor DebugInfoData, uint32_t *OffsetPtr,
+ const DWARFUnit *U) const {
+ return DWARFFormValue::skipValue(Form, DebugInfoData, OffsetPtr, U);
}
-bool DWARFFormValue::skipValue(dwarf::Form form, DataExtractor DebugInfoData,
- uint32_t *offset_ptr, const DWARFUnit *U) {
- return skipFormValue(form, DebugInfoData, offset_ptr, U);
+bool DWARFFormValue::skipValue(dwarf::Form Form, DataExtractor DebugInfoData,
+ uint32_t *OffsetPtr, const DWARFUnit *U) {
+ return skipFormValue(Form, DebugInfoData, OffsetPtr, U);
}
-bool DWARFFormValue::skipValue(dwarf::Form form, DataExtractor DebugInfoData,
- uint32_t *offset_ptr, uint16_t Version,
+bool DWARFFormValue::skipValue(dwarf::Form Form, DataExtractor DebugInfoData,
+ uint32_t *OffsetPtr, uint16_t Version,
uint8_t AddrSize,
llvm::dwarf::DwarfFormat Format) {
FormSizeHelper FSH(Version, AddrSize, Format);
- return skipFormValue(form, DebugInfoData, offset_ptr, &FSH);
+ return skipFormValue(Form, DebugInfoData, OffsetPtr, &FSH);
}
-void
-DWARFFormValue::dump(raw_ostream &OS) const {
- uint64_t uvalue = Value.uval;
- bool cu_relative_offset = false;
+void DWARFFormValue::dump(raw_ostream &OS) const {
+ uint64_t UValue = Value.uval;
+ bool CURelativeOffset = false;
switch (Form) {
- case DW_FORM_addr: OS << format("0x%016" PRIx64, uvalue); break;
+ case DW_FORM_addr:
+ OS << format("0x%016" PRIx64, UValue);
+ break;
case DW_FORM_GNU_addr_index: {
- OS << format(" indexed (%8.8x) address = ", (uint32_t)uvalue);
+ OS << format(" indexed (%8.8x) address = ", (uint32_t)UValue);
uint64_t Address;
if (U == nullptr)
OS << "<invalid dwarf unit>";
- else if (U->getAddrOffsetSectionItem(uvalue, Address))
+ else if (U->getAddrOffsetSectionItem(UValue, Address))
OS << format("0x%016" PRIx64, Address);
else
OS << "<no .debug_addr section>";
break;
}
- case DW_FORM_flag_present: OS << "true"; break;
+ case DW_FORM_flag_present:
+ OS << "true";
+ break;
case DW_FORM_flag:
- case DW_FORM_data1: OS << format("0x%02x", (uint8_t)uvalue); break;
- case DW_FORM_data2: OS << format("0x%04x", (uint16_t)uvalue); break;
- case DW_FORM_data4: OS << format("0x%08x", (uint32_t)uvalue); break;
+ case DW_FORM_data1:
+ OS << format("0x%02x", (uint8_t)UValue);
+ break;
+ case DW_FORM_data2:
+ OS << format("0x%04x", (uint16_t)UValue);
+ break;
+ case DW_FORM_data4:
+ OS << format("0x%08x", (uint32_t)UValue);
+ break;
case DW_FORM_ref_sig8:
- case DW_FORM_data8: OS << format("0x%016" PRIx64, uvalue); break;
+ case DW_FORM_data8:
+ OS << format("0x%016" PRIx64, UValue);
+ break;
case DW_FORM_string:
OS << '"';
OS.write_escaped(Value.cstr);
@@ -492,80 +500,92 @@ DWARFFormValue::dump(raw_ostream &OS) const {
case DW_FORM_block1:
case DW_FORM_block2:
case DW_FORM_block4:
- if (uvalue > 0) {
+ if (UValue > 0) {
switch (Form) {
case DW_FORM_exprloc:
- case DW_FORM_block: OS << format("<0x%" PRIx64 "> ", uvalue); break;
- case DW_FORM_block1: OS << format("<0x%2.2x> ", (uint8_t)uvalue); break;
- case DW_FORM_block2: OS << format("<0x%4.4x> ", (uint16_t)uvalue); break;
- case DW_FORM_block4: OS << format("<0x%8.8x> ", (uint32_t)uvalue); break;
- default: break;
+ case DW_FORM_block:
+ OS << format("<0x%" PRIx64 "> ", UValue);
+ break;
+ case DW_FORM_block1:
+ OS << format("<0x%2.2x> ", (uint8_t)UValue);
+ break;
+ case DW_FORM_block2:
+ OS << format("<0x%4.4x> ", (uint16_t)UValue);
+ break;
+ case DW_FORM_block4:
+ OS << format("<0x%8.8x> ", (uint32_t)UValue);
+ break;
+ default:
+ break;
}
- const uint8_t* data_ptr = Value.data;
- if (data_ptr) {
- // uvalue contains size of block
- const uint8_t* end_data_ptr = data_ptr + uvalue;
- while (data_ptr < end_data_ptr) {
- OS << format("%2.2x ", *data_ptr);
- ++data_ptr;
+ const uint8_t *DataPtr = Value.data;
+ if (DataPtr) {
+ // UValue contains size of block
+ const uint8_t *EndDataPtr = DataPtr + UValue;
+ while (DataPtr < EndDataPtr) {
+ OS << format("%2.2x ", *DataPtr);
+ ++DataPtr;
}
- }
- else
+ } else
OS << "NULL";
}
break;
- case DW_FORM_sdata: OS << Value.sval; break;
- case DW_FORM_udata: OS << Value.uval; break;
+ case DW_FORM_sdata:
+ OS << Value.sval;
+ break;
+ case DW_FORM_udata:
+ OS << Value.uval;
+ break;
case DW_FORM_strp:
- OS << format(" .debug_str[0x%8.8x] = ", (uint32_t)uvalue);
+ OS << format(" .debug_str[0x%8.8x] = ", (uint32_t)UValue);
dumpString(OS);
break;
case DW_FORM_GNU_str_index:
- OS << format(" indexed (%8.8x) string = ", (uint32_t)uvalue);
+ OS << format(" indexed (%8.8x) string = ", (uint32_t)UValue);
dumpString(OS);
break;
case DW_FORM_GNU_strp_alt:
- OS << format("alt indirect string, offset: 0x%" PRIx64 "", uvalue);
+ OS << format("alt indirect string, offset: 0x%" PRIx64 "", UValue);
dumpString(OS);
break;
case DW_FORM_ref_addr:
- OS << format("0x%016" PRIx64, uvalue);
+ OS << format("0x%016" PRIx64, UValue);
break;
case DW_FORM_ref1:
- cu_relative_offset = true;
- OS << format("cu + 0x%2.2x", (uint8_t)uvalue);
+ CURelativeOffset = true;
+ OS << format("cu + 0x%2.2x", (uint8_t)UValue);
break;
case DW_FORM_ref2:
- cu_relative_offset = true;
- OS << format("cu + 0x%4.4x", (uint16_t)uvalue);
+ CURelativeOffset = true;
+ OS << format("cu + 0x%4.4x", (uint16_t)UValue);
break;
case DW_FORM_ref4:
- cu_relative_offset = true;
- OS << format("cu + 0x%4.4x", (uint32_t)uvalue);
+ CURelativeOffset = true;
+ OS << format("cu + 0x%4.4x", (uint32_t)UValue);
break;
case DW_FORM_ref8:
- cu_relative_offset = true;
- OS << format("cu + 0x%8.8" PRIx64, uvalue);
+ CURelativeOffset = true;
+ OS << format("cu + 0x%8.8" PRIx64, UValue);
break;
case DW_FORM_ref_udata:
- cu_relative_offset = true;
- OS << format("cu + 0x%" PRIx64, uvalue);
+ CURelativeOffset = true;
+ OS << format("cu + 0x%" PRIx64, UValue);
break;
case DW_FORM_GNU_ref_alt:
- OS << format("<alt 0x%" PRIx64 ">", uvalue);
+ OS << format("<alt 0x%" PRIx64 ">", UValue);
break;
- // All DW_FORM_indirect attributes should be resolved prior to calling
- // this function
+ // All DW_FORM_indirect attributes should be resolved prior to calling
+ // this function
case DW_FORM_indirect:
OS << "DW_FORM_indirect";
break;
- // Should be formatted to 64-bit for DWARF64.
+ // Should be formatted to 64-bit for DWARF64.
case DW_FORM_sec_offset:
- OS << format("0x%08x", (uint32_t)uvalue);
+ OS << format("0x%08x", (uint32_t)UValue);
break;
default:
@@ -573,10 +593,10 @@ DWARFFormValue::dump(raw_ostream &OS) const {
break;
}
- if (cu_relative_offset) {
+ if (CURelativeOffset) {
OS << " => {";
WithColor(OS, syntax::Address).get()
- << format("0x%8.8" PRIx64, uvalue + (U ? U->getOffset() : 0));
+ << format("0x%8.8" PRIx64, UValue + (U ? U->getOffset() : 0));
OS << "}";
}
}
@@ -653,15 +673,16 @@ Optional<uint64_t> DWARFFormValue::getAsSectionOffset() const {
}
Optional<uint64_t> DWARFFormValue::getAsUnsignedConstant() const {
- if ((!isFormClass(FC_Constant) && !isFormClass(FC_Flag))
- || Form == DW_FORM_sdata)
+ if ((!isFormClass(FC_Constant) && !isFormClass(FC_Flag)) ||
+ Form == DW_FORM_sdata)
return None;
return Value.uval;
}
Optional<int64_t> DWARFFormValue::getAsSignedConstant() const {
if ((!isFormClass(FC_Constant) && !isFormClass(FC_Flag)) ||
- (Form == DW_FORM_udata && uint64_t(std::numeric_limits<int64_t>::max()) < Value.uval))
+ (Form == DW_FORM_udata &&
+ uint64_t(std::numeric_limits<int64_t>::max()) < Value.uval))
return None;
switch (Form) {
case DW_FORM_data4:
diff --git a/lib/DebugInfo/PDB/CMakeLists.txt b/lib/DebugInfo/PDB/CMakeLists.txt
index e1753018c7df..e9fd29ccc4ca 100644
--- a/lib/DebugInfo/PDB/CMakeLists.txt
+++ b/lib/DebugInfo/PDB/CMakeLists.txt
@@ -30,6 +30,7 @@ endif()
add_pdb_impl_folder(Native
Native/DbiModuleDescriptor.cpp
Native/DbiModuleDescriptorBuilder.cpp
+ Native/DbiModuleList.cpp
Native/DbiStream.cpp
Native/DbiStreamBuilder.cpp
Native/EnumTables.cpp
diff --git a/lib/DebugInfo/PDB/Native/DbiModuleList.cpp b/lib/DebugInfo/PDB/Native/DbiModuleList.cpp
new file mode 100644
index 000000000000..434f775097e0
--- /dev/null
+++ b/lib/DebugInfo/PDB/Native/DbiModuleList.cpp
@@ -0,0 +1,273 @@
+//===- DbiModuleList.cpp - PDB module information list ----------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+#include "llvm/DebugInfo/PDB/Native/DbiModuleList.h"
+
+#include "llvm/DebugInfo/PDB/Native/RawError.h"
+#include "llvm/Support/Error.h"
+
+using namespace llvm;
+using namespace llvm::pdb;
+
+DbiModuleSourceFilesIterator::DbiModuleSourceFilesIterator(
+ const DbiModuleList &Modules, uint32_t Modi, uint16_t Filei)
+ : Modules(&Modules), Modi(Modi), Filei(Filei) {
+ setValue();
+}
+
+bool DbiModuleSourceFilesIterator::
+operator==(const DbiModuleSourceFilesIterator &R) const {
+ // incompatible iterators are never equal
+ if (!isCompatible(R))
+ return false;
+
+ // If they're compatible, and they're both ends, then they're equal.
+ if (isEnd() && R.isEnd())
+ return true;
+
+ // If one is an end and the other is not, they're not equal.
+ if (isEnd() != R.isEnd())
+ return false;
+
+ // Now we know:
+ // - They're compatible
+ // - They're not *both* end iterators
+ // - Their endness is the same.
+ // Thus, they're compatible iterators pointing to a valid file on the same
+ // module. All we need to check are the file indices.
+ assert(Modules == R.Modules);
+ assert(Modi == R.Modi);
+ assert(!isEnd());
+ assert(!R.isEnd());
+
+ return (Filei == R.Filei);
+}
+
+bool DbiModuleSourceFilesIterator::
+operator<(const DbiModuleSourceFilesIterator &R) const {
+ assert(isCompatible(R));
+
+ // It's not sufficient to compare the file indices, because default
+ // constructed iterators could be equal to iterators with valid indices. To
+ // account for this, early-out if they're equal.
+ if (*this == R)
+ return false;
+
+ return Filei < R.Filei;
+}
+
+std::ptrdiff_t DbiModuleSourceFilesIterator::
+operator-(const DbiModuleSourceFilesIterator &R) const {
+ assert(isCompatible(R));
+ assert(!(*this < R));
+
+ // If they're both end iterators, the distance is 0.
+ if (isEnd() && R.isEnd())
+ return 0;
+
+ assert(!R.isEnd());
+
+ // At this point, R cannot be end, but *this can, which means that *this
+ // might be a universal end iterator with none of its fields set. So in that
+ // case have to rely on R as the authority to figure out how many files there
+ // are to compute the distance.
+ uint32_t Thisi = Filei;
+ if (isEnd()) {
+ uint32_t RealModi = R.Modi;
+ Thisi = R.Modules->getSourceFileCount(RealModi);
+ }
+
+ assert(Thisi >= R.Filei);
+ return Thisi - R.Filei;
+}
+
+DbiModuleSourceFilesIterator &DbiModuleSourceFilesIterator::
+operator+=(std::ptrdiff_t N) {
+ assert(!isEnd());
+
+ Filei += N;
+ assert(Filei <= Modules->getSourceFileCount(Modi));
+ setValue();
+ return *this;
+}
+
+DbiModuleSourceFilesIterator &DbiModuleSourceFilesIterator::
+operator-=(std::ptrdiff_t N) {
+ // Note that we can subtract from an end iterator, but not a universal end
+ // iterator.
+ assert(!isUniversalEnd());
+
+ assert(N <= Filei);
+
+ Filei -= N;
+ return *this;
+}
+
+void DbiModuleSourceFilesIterator::setValue() {
+ if (isEnd()) {
+ ThisValue = "";
+ return;
+ }
+
+ uint32_t Off = Modules->ModuleInitialFileIndex[Modi] + Filei;
+ auto ExpectedValue = Modules->getFileName(Off);
+ if (!ExpectedValue) {
+ consumeError(ExpectedValue.takeError());
+ Filei = Modules->getSourceFileCount(Modi);
+ } else
+ ThisValue = *ExpectedValue;
+}
+
+bool DbiModuleSourceFilesIterator::isEnd() const {
+ if (isUniversalEnd())
+ return true;
+
+ assert(Modules);
+ assert(Modi <= Modules->getModuleCount());
+ assert(Filei <= Modules->getSourceFileCount(Modi));
+
+ if (Modi == Modules->getModuleCount())
+ return true;
+ if (Filei == Modules->getSourceFileCount(Modi))
+ return true;
+ return false;
+}
+
+bool DbiModuleSourceFilesIterator::isUniversalEnd() const { return !Modules; }
+
+bool DbiModuleSourceFilesIterator::isCompatible(
+ const DbiModuleSourceFilesIterator &R) const {
+ // Universal iterators are compatible with any other iterator.
+ if (isUniversalEnd() || R.isUniversalEnd())
+ return true;
+
+ // At this point, neither iterator is a universal end iterator, although one
+ // or both might be non-universal end iterators. Regardless, the module index
+ // is valid, so they are compatible if and only if they refer to the same
+ // module.
+ return Modi == R.Modi;
+}
+
+Error DbiModuleList::initialize(BinaryStreamRef ModInfo,
+ BinaryStreamRef FileInfo) {
+ if (auto EC = initializeModInfo(ModInfo))
+ return EC;
+ if (auto EC = initializeFileInfo(FileInfo))
+ return EC;
+
+ return Error::success();
+}
+
+Error DbiModuleList::initializeModInfo(BinaryStreamRef ModInfo) {
+ ModInfoSubstream = ModInfo;
+
+ if (ModInfo.getLength() == 0)
+ return Error::success();
+
+ BinaryStreamReader Reader(ModInfo);
+
+ if (auto EC = Reader.readArray(Descriptors, ModInfo.getLength()))
+ return EC;
+
+ return Error::success();
+}
+
+Error DbiModuleList::initializeFileInfo(BinaryStreamRef FileInfo) {
+ FileInfoSubstream = FileInfo;
+
+ if (FileInfo.getLength() == 0)
+ return Error::success();
+
+ BinaryStreamReader FISR(FileInfo);
+ if (auto EC = FISR.readObject(FileInfoHeader))
+ return EC;
+
+ // First is an array of `NumModules` module indices. This does not seem to be
+ // used for anything meaningful, so we ignore it.
+ FixedStreamArray<support::ulittle16_t> ModuleIndices;
+ if (auto EC = FISR.readArray(ModuleIndices, FileInfoHeader->NumModules))
+ return EC;
+ if (auto EC = FISR.readArray(ModFileCountArray, FileInfoHeader->NumModules))
+ return EC;
+
+ // Compute the real number of source files. We can't trust the value in
+ // `FileInfoHeader->NumSourceFiles` because it is a unit16, and the sum of all
+ // source file counts might be larger than a unit16. So we compute the real
+ // count by summing up the individual counts.
+ uint32_t NumSourceFiles = 0;
+ for (auto Count : ModFileCountArray)
+ NumSourceFiles += Count;
+
+ // In the reference implementation, this array is where the pointer documented
+ // at the definition of ModuleInfoHeader::FileNameOffs points to. Note that
+ // although the field in ModuleInfoHeader is ignored this array is not, as it
+ // is the authority on where each filename begins in the names buffer.
+ if (auto EC = FISR.readArray(FileNameOffsets, NumSourceFiles))
+ return EC;
+
+ if (auto EC = FISR.readStreamRef(NamesBuffer))
+ return EC;
+
+ auto DescriptorIter = Descriptors.begin();
+ uint32_t NextFileIndex = 0;
+ ModuleInitialFileIndex.resize(FileInfoHeader->NumModules);
+ ModuleDescriptorOffsets.resize(FileInfoHeader->NumModules);
+ for (size_t I = 0; I < FileInfoHeader->NumModules; ++I) {
+ assert(DescriptorIter != Descriptors.end());
+ ModuleInitialFileIndex[I] = NextFileIndex;
+ ModuleDescriptorOffsets[I] = DescriptorIter.offset();
+
+ NextFileIndex += ModFileCountArray[I];
+ ++DescriptorIter;
+ }
+
+ assert(DescriptorIter == Descriptors.end());
+ assert(NextFileIndex == NumSourceFiles);
+
+ return Error::success();
+}
+
+uint32_t DbiModuleList::getModuleCount() const {
+ return FileInfoHeader->NumModules;
+}
+
+uint32_t DbiModuleList::getSourceFileCount() const {
+ return FileNameOffsets.size();
+}
+
+uint16_t DbiModuleList::getSourceFileCount(uint32_t Modi) const {
+ return ModFileCountArray[Modi];
+}
+
+DbiModuleDescriptor DbiModuleList::getModuleDescriptor(uint32_t Modi) const {
+ assert(Modi < getModuleCount());
+ uint32_t Offset = ModuleDescriptorOffsets[Modi];
+ auto Iter = Descriptors.at(Offset);
+ assert(Iter != Descriptors.end());
+ return *Iter;
+}
+
+iterator_range<DbiModuleSourceFilesIterator>
+DbiModuleList::source_files(uint32_t Modi) const {
+ return make_range<DbiModuleSourceFilesIterator>(
+ DbiModuleSourceFilesIterator(*this, Modi, 0),
+ DbiModuleSourceFilesIterator());
+}
+
+Expected<StringRef> DbiModuleList::getFileName(uint32_t Index) const {
+ BinaryStreamReader Names(NamesBuffer);
+ if (Index >= getSourceFileCount())
+ return make_error<RawError>(raw_error_code::index_out_of_bounds);
+
+ uint32_t FileOffset = FileNameOffsets[Index];
+ Names.setOffset(FileOffset);
+ StringRef Name;
+ if (auto EC = Names.readCString(Name))
+ return std::move(EC);
+ return Name;
+}
diff --git a/lib/DebugInfo/PDB/Native/DbiStream.cpp b/lib/DebugInfo/PDB/Native/DbiStream.cpp
index db703809f7c9..f7538c580ba4 100644
--- a/lib/DebugInfo/PDB/Native/DbiStream.cpp
+++ b/lib/DebugInfo/PDB/Native/DbiStream.cpp
@@ -107,11 +107,11 @@ Error DbiStream::reload() {
return make_error<RawError>(raw_error_code::corrupt_file,
"DBI type server substream not aligned.");
+ BinaryStreamRef ModInfoSubstream;
+ BinaryStreamRef FileInfoSubstream;
if (auto EC =
Reader.readStreamRef(ModInfoSubstream, Header->ModiSubstreamSize))
return EC;
- if (auto EC = initializeModInfoArray())
- return EC;
if (auto EC = Reader.readStreamRef(SecContrSubstream,
Header->SecContrSubstreamSize))
@@ -129,14 +129,15 @@ Error DbiStream::reload() {
DbgStreams, Header->OptionalDbgHdrSize / sizeof(ulittle16_t)))
return EC;
+ if (auto EC = Modules.initialize(ModInfoSubstream, FileInfoSubstream))
+ return EC;
+
if (auto EC = initializeSectionContributionData())
return EC;
if (auto EC = initializeSectionHeadersData())
return EC;
if (auto EC = initializeSectionMapData())
return EC;
- if (auto EC = initializeFileInfo())
- return EC;
if (auto EC = initializeFpoRecords())
return EC;
@@ -215,7 +216,8 @@ FixedStreamArray<object::FpoData> DbiStream::getFpoRecords() {
return FpoRecords;
}
-ArrayRef<ModuleInfoEx> DbiStream::modules() const { return ModuleInfos; }
+const DbiModuleList &DbiStream::modules() const { return Modules; }
+
FixedStreamArray<SecMapEntry> DbiStream::getSectionMap() const {
return SectionMap;
}
@@ -248,25 +250,6 @@ Error DbiStream::initializeSectionContributionData() {
"Unsupported DBI Section Contribution version");
}
-Error DbiStream::initializeModInfoArray() {
- if (ModInfoSubstream.getLength() == 0)
- return Error::success();
-
- // Since each DbiModuleDescriptor in the stream is a variable length, we have
- // to iterate
- // them to know how many there actually are.
- BinaryStreamReader Reader(ModInfoSubstream);
-
- VarStreamArray<DbiModuleDescriptor> ModInfoArray;
- if (auto EC = Reader.readArray(ModInfoArray, ModInfoSubstream.getLength()))
- return EC;
- for (auto &Info : ModInfoArray) {
- ModuleInfos.emplace_back(Info);
- }
-
- return Error::success();
-}
-
// Initializes this->SectionHeaders.
Error DbiStream::initializeSectionHeadersData() {
if (DbgStreams.size() == 0)
@@ -338,90 +321,9 @@ Error DbiStream::initializeSectionMapData() {
return Error::success();
}
-Error DbiStream::initializeFileInfo() {
- if (FileInfoSubstream.getLength() == 0)
- return Error::success();
-
- const FileInfoSubstreamHeader *FH;
- BinaryStreamReader FISR(FileInfoSubstream);
- if (auto EC = FISR.readObject(FH))
- return EC;
-
- // The number of modules in the stream should be the same as reported by
- // the FileInfoSubstreamHeader.
- if (FH->NumModules != ModuleInfos.size())
- return make_error<RawError>(raw_error_code::corrupt_file,
- "FileInfo substream count doesn't match DBI.");
-
- FixedStreamArray<ulittle16_t> ModIndexArray;
- FixedStreamArray<ulittle16_t> ModFileCountArray;
-
- // First is an array of `NumModules` module indices. This is not used for the
- // same reason that `NumSourceFiles` is not used. It's an array of uint16's,
- // but it's possible there are more than 64k source files, which would imply
- // more than 64k modules (e.g. object files) as well. So we ignore this
- // field.
- if (auto EC = FISR.readArray(ModIndexArray, ModuleInfos.size()))
- return EC;
- if (auto EC = FISR.readArray(ModFileCountArray, ModuleInfos.size()))
- return EC;
-
- // Compute the real number of source files.
- uint32_t NumSourceFiles = 0;
- for (auto Count : ModFileCountArray)
- NumSourceFiles += Count;
-
- // This is the array that in the reference implementation corresponds to
- // `DbiModuleDescriptor::FileLayout::FileNameOffs`, which is commented there
- // as being a
- // pointer. Due to the mentioned problems of pointers causing difficulty
- // when reading from the file on 64-bit systems, we continue to ignore that
- // field in `DbiModuleDescriptor`, and instead build a vector of StringRefs
- // and stores
- // them in `ModuleInfoEx`. The value written to and read from the file is
- // not used anyway, it is only there as a way to store the offsets for the
- // purposes of later accessing the names at runtime.
- if (auto EC = FISR.readArray(FileNameOffsets, NumSourceFiles))
- return EC;
-
- if (auto EC = FISR.readStreamRef(NamesBuffer))
- return EC;
-
- // We go through each ModuleInfo, determine the number N of source files for
- // that module, and then get the next N offsets from the Offsets array, using
- // them to get the corresponding N names from the Names buffer and associating
- // each one with the corresponding module.
- uint32_t NextFileIndex = 0;
- for (size_t I = 0; I < ModuleInfos.size(); ++I) {
- uint32_t NumFiles = ModFileCountArray[I];
- ModuleInfos[I].SourceFiles.resize(NumFiles);
- for (size_t J = 0; J < NumFiles; ++J, ++NextFileIndex) {
- auto ThisName = getFileNameForIndex(NextFileIndex);
- if (!ThisName)
- return ThisName.takeError();
- ModuleInfos[I].SourceFiles[J] = *ThisName;
- }
- }
-
- return Error::success();
-}
-
uint32_t DbiStream::getDebugStreamIndex(DbgHeaderType Type) const {
uint16_t T = static_cast<uint16_t>(Type);
if (T >= DbgStreams.size())
return kInvalidStreamIndex;
return DbgStreams[T];
}
-
-Expected<StringRef> DbiStream::getFileNameForIndex(uint32_t Index) const {
- BinaryStreamReader Names(NamesBuffer);
- if (Index >= FileNameOffsets.size())
- return make_error<RawError>(raw_error_code::index_out_of_bounds);
-
- uint32_t FileOffset = FileNameOffsets[Index];
- Names.setOffset(FileOffset);
- StringRef Name;
- if (auto EC = Names.readCString(Name))
- return std::move(EC);
- return Name;
-}
diff --git a/lib/DebugInfo/PDB/Native/NativeCompilandSymbol.cpp b/lib/DebugInfo/PDB/Native/NativeCompilandSymbol.cpp
index 9c0cc0bf8233..77f832582f82 100644
--- a/lib/DebugInfo/PDB/Native/NativeCompilandSymbol.cpp
+++ b/lib/DebugInfo/PDB/Native/NativeCompilandSymbol.cpp
@@ -13,7 +13,7 @@ namespace llvm {
namespace pdb {
NativeCompilandSymbol::NativeCompilandSymbol(NativeSession &Session,
- const ModuleInfoEx &MI)
+ DbiModuleDescriptor MI)
: NativeRawSymbol(Session), Module(MI) {}
PDB_SymType NativeCompilandSymbol::getSymTag() const {
@@ -21,7 +21,7 @@ PDB_SymType NativeCompilandSymbol::getSymTag() const {
}
bool NativeCompilandSymbol::isEditAndContinueEnabled() const {
- return Module.Info.hasECInfo();
+ return Module.hasECInfo();
}
uint32_t NativeCompilandSymbol::getLexicalParentId() const { return 0; }
@@ -32,11 +32,11 @@ uint32_t NativeCompilandSymbol::getLexicalParentId() const { return 0; }
// this potential confusion.
std::string NativeCompilandSymbol::getLibraryName() const {
- return Module.Info.getObjFileName();
+ return Module.getObjFileName();
}
std::string NativeCompilandSymbol::getName() const {
- return Module.Info.getModuleName();
+ return Module.getModuleName();
}
} // namespace pdb
diff --git a/lib/DebugInfo/PDB/Native/NativeEnumModules.cpp b/lib/DebugInfo/PDB/Native/NativeEnumModules.cpp
index 7532110d005c..97319fd77d11 100644
--- a/lib/DebugInfo/PDB/Native/NativeEnumModules.cpp
+++ b/lib/DebugInfo/PDB/Native/NativeEnumModules.cpp
@@ -10,6 +10,7 @@
#include "llvm/DebugInfo/PDB/Native/NativeEnumModules.h"
#include "llvm/DebugInfo/PDB/IPDBEnumChildren.h"
+#include "llvm/DebugInfo/PDB/Native/DbiModuleList.h"
#include "llvm/DebugInfo/PDB/Native/NativeCompilandSymbol.h"
#include "llvm/DebugInfo/PDB/Native/NativeSession.h"
#include "llvm/DebugInfo/PDB/PDBSymbol.h"
@@ -19,25 +20,25 @@ namespace llvm {
namespace pdb {
NativeEnumModules::NativeEnumModules(NativeSession &PDBSession,
- ArrayRef<ModuleInfoEx> Modules,
+ const DbiModuleList &Modules,
uint32_t Index)
: Session(PDBSession), Modules(Modules), Index(Index) {}
uint32_t NativeEnumModules::getChildCount() const {
- return static_cast<uint32_t>(Modules.size());
+ return static_cast<uint32_t>(Modules.getModuleCount());
}
std::unique_ptr<PDBSymbol>
NativeEnumModules::getChildAtIndex(uint32_t Index) const {
- if (Index >= Modules.size())
+ if (Index >= Modules.getModuleCount())
return nullptr;
- return std::unique_ptr<PDBSymbol>(new PDBSymbolCompiland(Session,
- std::unique_ptr<IPDBRawSymbol>(
- new NativeCompilandSymbol(Session, Modules[Index]))));
+ return std::unique_ptr<PDBSymbol>(new PDBSymbolCompiland(
+ Session, std::unique_ptr<IPDBRawSymbol>(new NativeCompilandSymbol(
+ Session, Modules.getModuleDescriptor(Index)))));
}
std::unique_ptr<PDBSymbol> NativeEnumModules::getNext() {
- if (Index >= Modules.size())
+ if (Index >= Modules.getModuleCount())
return nullptr;
return getChildAtIndex(Index++);
}
diff --git a/lib/DebugInfo/PDB/Native/NativeExeSymbol.cpp b/lib/DebugInfo/PDB/Native/NativeExeSymbol.cpp
index ec2a4b87457c..bb52560be167 100644
--- a/lib/DebugInfo/PDB/Native/NativeExeSymbol.cpp
+++ b/lib/DebugInfo/PDB/Native/NativeExeSymbol.cpp
@@ -26,7 +26,7 @@ NativeExeSymbol::findChildren(PDB_SymType Type) const {
case PDB_SymType::Compiland: {
auto Dbi = File.getPDBDbiStream();
if (Dbi) {
- const auto Modules = Dbi->modules();
+ const DbiModuleList &Modules = Dbi->modules();
return std::unique_ptr<IPDBEnumSymbols>(
new NativeEnumModules(Session, Modules));
}
diff --git a/lib/DebugInfo/PDB/Native/TpiStream.cpp b/lib/DebugInfo/PDB/Native/TpiStream.cpp
index 5fef3edf8c2d..c0999d93dbb9 100644
--- a/lib/DebugInfo/PDB/Native/TpiStream.cpp
+++ b/lib/DebugInfo/PDB/Native/TpiStream.cpp
@@ -39,20 +39,6 @@ TpiStream::TpiStream(const PDBFile &File,
TpiStream::~TpiStream() = default;
-// Verifies that a given type record matches with a given hash value.
-// Currently we only verify SRC_LINE records.
-Error TpiStream::verifyHashValues() {
- TpiHashVerifier Verifier(HashValues, Header->NumHashBuckets);
- TypeDeserializer Deserializer;
-
- TypeVisitorCallbackPipeline Pipeline;
- Pipeline.addCallbackToPipeline(Deserializer);
- Pipeline.addCallbackToPipeline(Verifier);
-
- CVTypeVisitor Visitor(Pipeline);
- return Visitor.visitTypeStream(TypeRecords);
-}
-
Error TpiStream::reload() {
BinaryStreamReader Reader(*Stream);
@@ -98,7 +84,7 @@ Error TpiStream::reload() {
// There should be a hash value for every type record, or no hashes at all.
uint32_t NumHashValues =
Header->HashValueBuffer.Length / sizeof(ulittle32_t);
- if (NumHashValues != NumTypeRecords() && NumHashValues != 0)
+ if (NumHashValues != getNumTypeRecords() && NumHashValues != 0)
return make_error<RawError>(
raw_error_code::corrupt_file,
"TPI hash count does not match with the number of type records.");
@@ -122,12 +108,6 @@ Error TpiStream::reload() {
}
HashStream = std::move(HS);
-
- // TPI hash table is a parallel array for the type records.
- // Verify that the hash values match with type records.
- if (NumHashValues > 0)
- if (auto EC = verifyHashValues())
- return EC;
}
return Error::success();
@@ -142,7 +122,7 @@ uint32_t TpiStream::TypeIndexBegin() const { return Header->TypeIndexBegin; }
uint32_t TpiStream::TypeIndexEnd() const { return Header->TypeIndexEnd; }
-uint32_t TpiStream::NumTypeRecords() const {
+uint32_t TpiStream::getNumTypeRecords() const {
return TypeIndexEnd() - TypeIndexBegin();
}
@@ -154,7 +134,7 @@ uint16_t TpiStream::getTypeHashStreamAuxIndex() const {
return Header->HashAuxStreamIndex;
}
-uint32_t TpiStream::NumHashBuckets() const { return Header->NumHashBuckets; }
+uint32_t TpiStream::getNumHashBuckets() const { return Header->NumHashBuckets; }
uint32_t TpiStream::getHashKeySize() const { return Header->HashKeySize; }
FixedStreamArray<support::ulittle32_t> TpiStream::getHashValues() const {
diff --git a/lib/ExecutionEngine/RuntimeDyld/RuntimeDyldChecker.cpp b/lib/ExecutionEngine/RuntimeDyld/RuntimeDyldChecker.cpp
index 7bfa79445584..e45fdc7aee18 100644
--- a/lib/ExecutionEngine/RuntimeDyld/RuntimeDyldChecker.cpp
+++ b/lib/ExecutionEngine/RuntimeDyld/RuntimeDyldChecker.cpp
@@ -861,6 +861,15 @@ RuntimeDyldCheckerImpl::getSubsectionStartingAt(StringRef Name) const {
SymInfo.getOffset());
}
+Optional<uint64_t>
+RuntimeDyldCheckerImpl::getSectionLoadAddress(void *LocalAddress) const {
+ for (auto &S : getRTDyld().Sections) {
+ if (S.getAddress() == LocalAddress)
+ return S.getLoadAddress();
+ }
+ return Optional<uint64_t>();
+}
+
void RuntimeDyldCheckerImpl::registerSection(
StringRef FilePath, unsigned SectionID) {
StringRef FileName = sys::path::filename(FilePath);
@@ -935,3 +944,8 @@ RuntimeDyldChecker::getSectionAddr(StringRef FileName, StringRef SectionName,
bool LocalAddress) {
return Impl->getSectionAddr(FileName, SectionName, LocalAddress);
}
+
+Optional<uint64_t>
+RuntimeDyldChecker::getSectionLoadAddress(void *LocalAddress) const {
+ return Impl->getSectionLoadAddress(LocalAddress);
+}
diff --git a/lib/ExecutionEngine/RuntimeDyld/RuntimeDyldCheckerImpl.h b/lib/ExecutionEngine/RuntimeDyld/RuntimeDyldCheckerImpl.h
index b7263be09934..b462ef2c00ce 100644
--- a/lib/ExecutionEngine/RuntimeDyld/RuntimeDyldCheckerImpl.h
+++ b/lib/ExecutionEngine/RuntimeDyld/RuntimeDyldCheckerImpl.h
@@ -60,6 +60,8 @@ private:
bool IsInsideLoad) const;
StringRef getSubsectionStartingAt(StringRef Name) const;
+ Optional<uint64_t> getSectionLoadAddress(void *LocalAddr) const;
+
void registerSection(StringRef FilePath, unsigned SectionID);
void registerStubMap(StringRef FilePath, unsigned SectionID,
const RuntimeDyldImpl::StubMap &RTDyldStubs);
diff --git a/lib/Fuzzer/FuzzerLoop.cpp b/lib/Fuzzer/FuzzerLoop.cpp
index 4e4def8cb87e..d84c3dbdaf77 100644
--- a/lib/Fuzzer/FuzzerLoop.cpp
+++ b/lib/Fuzzer/FuzzerLoop.cpp
@@ -199,7 +199,7 @@ void Fuzzer::CrashCallback() {
Printf("SUMMARY: libFuzzer: deadly signal\n");
DumpCurrentUnit("crash-");
PrintFinalStats();
- exit(Options.ErrorExitCode);
+ _Exit(Options.ErrorExitCode); // Stop right now.
}
void Fuzzer::InterruptCallback() {
diff --git a/lib/IR/ConstantRange.cpp b/lib/IR/ConstantRange.cpp
index 5425676e4edc..aeb1257754f3 100644
--- a/lib/IR/ConstantRange.cpp
+++ b/lib/IR/ConstantRange.cpp
@@ -251,7 +251,7 @@ APInt ConstantRange::getSetSize() const {
}
bool
-ConstantRange::isSizeStrictlySmallerThanOf(const ConstantRange &Other) const {
+ConstantRange::isSizeStrictlySmallerThan(const ConstantRange &Other) const {
assert(getBitWidth() == Other.getBitWidth());
if (isFullSet())
return false;
@@ -260,6 +260,17 @@ ConstantRange::isSizeStrictlySmallerThanOf(const ConstantRange &Other) const {
return (Upper - Lower).ult(Other.Upper - Other.Lower);
}
+bool
+ConstantRange::isSizeLargerThan(uint64_t MaxSize) const {
+ assert(MaxSize && "MaxSize can't be 0.");
+ // If this a full set, we need special handling to avoid needing an extra bit
+ // to represent the size.
+ if (isFullSet())
+ return APInt::getMaxValue(getBitWidth()).ugt(MaxSize - 1);
+
+ return (Upper - Lower).ugt(MaxSize);
+}
+
APInt ConstantRange::getUnsignedMax() const {
if (isFullSet() || isWrappedSet())
return APInt::getMaxValue(getBitWidth());
@@ -374,7 +385,7 @@ ConstantRange ConstantRange::intersectWith(const ConstantRange &CR) const {
if (CR.Upper.ule(Lower))
return ConstantRange(CR.Lower, Upper);
- if (isSizeStrictlySmallerThanOf(CR))
+ if (isSizeStrictlySmallerThan(CR))
return *this;
return CR;
}
@@ -389,7 +400,7 @@ ConstantRange ConstantRange::intersectWith(const ConstantRange &CR) const {
if (CR.Upper.ult(Upper)) {
if (CR.Lower.ult(Upper)) {
- if (isSizeStrictlySmallerThanOf(CR))
+ if (isSizeStrictlySmallerThan(CR))
return *this;
return CR;
}
@@ -405,7 +416,7 @@ ConstantRange ConstantRange::intersectWith(const ConstantRange &CR) const {
return ConstantRange(CR.Lower, Upper);
}
- if (isSizeStrictlySmallerThanOf(CR))
+ if (isSizeStrictlySmallerThan(CR))
return *this;
return CR;
}
@@ -676,8 +687,8 @@ ConstantRange::add(const ConstantRange &Other) const {
return ConstantRange(getBitWidth(), /*isFullSet=*/true);
ConstantRange X = ConstantRange(std::move(NewLower), std::move(NewUpper));
- if (X.isSizeStrictlySmallerThanOf(*this) ||
- X.isSizeStrictlySmallerThanOf(Other))
+ if (X.isSizeStrictlySmallerThan(*this) ||
+ X.isSizeStrictlySmallerThan(Other))
// We've wrapped, therefore, full set.
return ConstantRange(getBitWidth(), /*isFullSet=*/true);
return X;
@@ -709,8 +720,8 @@ ConstantRange::sub(const ConstantRange &Other) const {
return ConstantRange(getBitWidth(), /*isFullSet=*/true);
ConstantRange X = ConstantRange(std::move(NewLower), std::move(NewUpper));
- if (X.isSizeStrictlySmallerThanOf(*this) ||
- X.isSizeStrictlySmallerThanOf(Other))
+ if (X.isSizeStrictlySmallerThan(*this) ||
+ X.isSizeStrictlySmallerThan(Other))
// We've wrapped, therefore, full set.
return ConstantRange(getBitWidth(), /*isFullSet=*/true);
return X;
@@ -766,7 +777,7 @@ ConstantRange::multiply(const ConstantRange &Other) const {
ConstantRange Result_sext(std::min(L, Compare), std::max(L, Compare) + 1);
ConstantRange SR = Result_sext.truncate(getBitWidth());
- return UR.isSizeStrictlySmallerThanOf(SR) ? UR : SR;
+ return UR.isSizeStrictlySmallerThan(SR) ? UR : SR;
}
ConstantRange
diff --git a/lib/IR/DataLayout.cpp b/lib/IR/DataLayout.cpp
index 93bacdd2e80f..c117d29b7f69 100644
--- a/lib/IR/DataLayout.cpp
+++ b/lib/IR/DataLayout.cpp
@@ -1,4 +1,4 @@
-//===-- DataLayout.cpp - Data size & alignment routines --------------------==//
+//===- DataLayout.cpp - Data size & alignment routines ---------------------==//
//
// The LLVM Compiler Infrastructure
//
@@ -16,21 +16,27 @@
//
//===----------------------------------------------------------------------===//
-#include "llvm/IR/DataLayout.h"
#include "llvm/ADT/DenseMap.h"
-#include "llvm/ADT/STLExtras.h"
+#include "llvm/ADT/StringRef.h"
#include "llvm/ADT/Triple.h"
#include "llvm/IR/Constants.h"
+#include "llvm/IR/DataLayout.h"
#include "llvm/IR/DerivedTypes.h"
#include "llvm/IR/GetElementPtrTypeIterator.h"
+#include "llvm/IR/GlobalVariable.h"
#include "llvm/IR/Module.h"
+#include "llvm/IR/Type.h"
+#include "llvm/IR/Value.h"
+#include "llvm/Support/Casting.h"
#include "llvm/Support/ErrorHandling.h"
-#include "llvm/Support/ManagedStatic.h"
#include "llvm/Support/MathExtras.h"
-#include "llvm/Support/Mutex.h"
-#include "llvm/Support/raw_ostream.h"
#include <algorithm>
+#include <cassert>
+#include <cstdint>
#include <cstdlib>
+#include <tuple>
+#include <utility>
+
using namespace llvm;
//===----------------------------------------------------------------------===//
@@ -73,7 +79,6 @@ StructLayout::StructLayout(StructType *ST, const DataLayout &DL) {
}
}
-
/// getElementContainingOffset - Given a valid offset into the structure,
/// return the structure index that contains it.
unsigned StructLayout::getElementContainingOffset(uint64_t Offset) const {
@@ -338,7 +343,7 @@ void DataLayout::parseSpecifier(StringRef Desc) {
break;
}
case 'n': // Native integer types.
- for (;;) {
+ while (true) {
unsigned Width = getInt(Tok);
if (Width == 0)
report_fatal_error(
@@ -393,7 +398,7 @@ void DataLayout::parseSpecifier(StringRef Desc) {
}
}
-DataLayout::DataLayout(const Module *M) : LayoutMap(nullptr) {
+DataLayout::DataLayout(const Module *M) {
init(M);
}
@@ -522,7 +527,7 @@ unsigned DataLayout::getAlignmentInfo(AlignTypeEnum AlignType,
namespace {
class StructLayoutMap {
- typedef DenseMap<StructType*, StructLayout*> LayoutInfoTy;
+ using LayoutInfoTy = DenseMap<StructType*, StructLayout*>;
LayoutInfoTy LayoutInfo;
public:
@@ -577,7 +582,6 @@ const StructLayout *DataLayout::getStructLayout(StructType *Ty) const {
return L;
}
-
unsigned DataLayout::getPointerABIAlignment(unsigned AS) const {
PointersTy::const_iterator I = findPointerLowerBound(AS);
if (I == Pointers.end() || I->AddressSpace != AS) {
@@ -778,4 +782,3 @@ unsigned DataLayout::getPreferredAlignment(const GlobalVariable *GV) const {
unsigned DataLayout::getPreferredAlignmentLog(const GlobalVariable *GV) const {
return Log2_32(getPreferredAlignment(GV));
}
-
diff --git a/lib/IR/DebugInfo.cpp b/lib/IR/DebugInfo.cpp
index c5d39c544304..ca3828420a72 100644
--- a/lib/IR/DebugInfo.cpp
+++ b/lib/IR/DebugInfo.cpp
@@ -1,4 +1,4 @@
-//===--- DebugInfo.cpp - Debug Information Helper Classes -----------------===//
+//===- DebugInfo.cpp - Debug Information Helper Classes -------------------===//
//
// The LLVM Compiler Infrastructure
//
@@ -12,22 +12,29 @@
//
//===----------------------------------------------------------------------===//
-#include "llvm/IR/DebugInfo.h"
-#include "LLVMContextImpl.h"
-#include "llvm/ADT/STLExtras.h"
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/DenseSet.h"
+#include "llvm/ADT/None.h"
#include "llvm/ADT/SmallPtrSet.h"
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/ADT/StringRef.h"
+#include "llvm/IR/BasicBlock.h"
#include "llvm/IR/Constants.h"
-#include "llvm/IR/DIBuilder.h"
-#include "llvm/IR/DerivedTypes.h"
+#include "llvm/IR/DebugInfo.h"
+#include "llvm/IR/DebugInfoMetadata.h"
+#include "llvm/IR/DebugLoc.h"
+#include "llvm/IR/Function.h"
#include "llvm/IR/GVMaterializer.h"
-#include "llvm/IR/Instructions.h"
+#include "llvm/IR/Instruction.h"
#include "llvm/IR/IntrinsicInst.h"
-#include "llvm/IR/Intrinsics.h"
+#include "llvm/IR/LLVMContext.h"
+#include "llvm/IR/Metadata.h"
#include "llvm/IR/Module.h"
-#include "llvm/IR/ValueHandle.h"
-#include "llvm/Support/Debug.h"
-#include "llvm/Support/Dwarf.h"
-#include "llvm/Support/raw_ostream.h"
+#include "llvm/Support/Casting.h"
+#include <algorithm>
+#include <cassert>
+#include <utility>
+
using namespace llvm;
using namespace llvm::dwarf;
@@ -249,7 +256,7 @@ bool DebugInfoFinder::addScope(DIScope *Scope) {
return true;
}
-static llvm::MDNode *stripDebugLocFromLoopID(llvm::MDNode *N) {
+static MDNode *stripDebugLocFromLoopID(MDNode *N) {
assert(N->op_begin() != N->op_end() && "Missing self reference?");
// if there is no debug location, we do not have to rewrite this MDNode.
@@ -288,7 +295,7 @@ bool llvm::stripDebugInfo(Function &F) {
F.setSubprogram(nullptr);
}
- llvm::DenseMap<llvm::MDNode*, llvm::MDNode*> LoopIDsMap;
+ DenseMap<MDNode*, MDNode*> LoopIDsMap;
for (BasicBlock &BB : F) {
for (auto II = BB.begin(), End = BB.end(); II != End;) {
Instruction &I = *II++; // We may delete the instruction, increment now.
@@ -525,7 +532,7 @@ private:
void traverse(MDNode *);
};
-} // Anonymous namespace.
+} // end anonymous namespace
void DebugTypeInfoRemoval::traverse(MDNode *N) {
if (!N || Replacements.count(N))
@@ -590,7 +597,7 @@ bool llvm::stripNonLineTableDebugInfo(Module &M) {
GV.eraseMetadata(LLVMContext::MD_dbg);
DebugTypeInfoRemoval Mapper(M.getContext());
- auto remap = [&](llvm::MDNode *Node) -> llvm::MDNode * {
+ auto remap = [&](MDNode *Node) -> MDNode * {
if (!Node)
return nullptr;
Mapper.traverseAndRemap(Node);
diff --git a/lib/IR/Instruction.cpp b/lib/IR/Instruction.cpp
index c26699eab4e2..906a28a5c887 100644
--- a/lib/IR/Instruction.cpp
+++ b/lib/IR/Instruction.cpp
@@ -625,20 +625,41 @@ void Instruction::updateProfWeight(uint64_t S, uint64_t T) {
return;
auto *ProfDataName = dyn_cast<MDString>(ProfileData->getOperand(0));
- if (!ProfDataName || !ProfDataName->getString().equals("branch_weights"))
+ if (!ProfDataName || (!ProfDataName->getString().equals("branch_weights") &&
+ !ProfDataName->getString().equals("VP")))
return;
- SmallVector<uint32_t, 4> Weights;
- for (unsigned i = 1; i < ProfileData->getNumOperands(); i++) {
- // Using APInt::div may be expensive, but most cases should fit in 64 bits.
- APInt Val(128, mdconst::dyn_extract<ConstantInt>(ProfileData->getOperand(i))
- ->getValue()
- .getZExtValue());
- Val *= APInt(128, S);
- Weights.push_back(Val.udiv(APInt(128, T)).getLimitedValue());
- }
MDBuilder MDB(getContext());
- setMetadata(LLVMContext::MD_prof, MDB.createBranchWeights(Weights));
+ SmallVector<Metadata *, 3> Vals;
+ Vals.push_back(ProfileData->getOperand(0));
+ APInt APS(128, S), APT(128, T);
+ if (ProfDataName->getString().equals("branch_weights"))
+ for (unsigned i = 1; i < ProfileData->getNumOperands(); i++) {
+ // Using APInt::div may be expensive, but most cases should fit 64 bits.
+ APInt Val(128,
+ mdconst::dyn_extract<ConstantInt>(ProfileData->getOperand(i))
+ ->getValue()
+ .getZExtValue());
+ Val *= APS;
+ Vals.push_back(MDB.createConstant(
+ ConstantInt::get(Type::getInt64Ty(getContext()),
+ Val.udiv(APT).getLimitedValue())));
+ }
+ else if (ProfDataName->getString().equals("VP"))
+ for (unsigned i = 1; i < ProfileData->getNumOperands(); i += 2) {
+ // The first value is the key of the value profile, which will not change.
+ Vals.push_back(ProfileData->getOperand(i));
+ // Using APInt::div may be expensive, but most cases should fit 64 bits.
+ APInt Val(128,
+ mdconst::dyn_extract<ConstantInt>(ProfileData->getOperand(i + 1))
+ ->getValue()
+ .getZExtValue());
+ Val *= APS;
+ Vals.push_back(MDB.createConstant(
+ ConstantInt::get(Type::getInt64Ty(getContext()),
+ Val.udiv(APT).getLimitedValue())));
+ }
+ setMetadata(LLVMContext::MD_prof, MDNode::get(getContext(), Vals));
}
void Instruction::setProfWeight(uint64_t W) {
diff --git a/lib/IR/ModuleSummaryIndex.cpp b/lib/IR/ModuleSummaryIndex.cpp
index 01e1b8168afa..9dd712f9ca13 100644
--- a/lib/IR/ModuleSummaryIndex.cpp
+++ b/lib/IR/ModuleSummaryIndex.cpp
@@ -22,7 +22,7 @@ void ModuleSummaryIndex::collectDefinedFunctionsForModule(
StringRef ModulePath, GVSummaryMapTy &GVSummaryMap) const {
for (auto &GlobalList : *this) {
auto GUID = GlobalList.first;
- for (auto &GlobSummary : GlobalList.second) {
+ for (auto &GlobSummary : GlobalList.second.SummaryList) {
auto *Summary = dyn_cast_or_null<FunctionSummary>(GlobSummary.get());
if (!Summary)
// Ignore global variable, focus on functions
@@ -40,7 +40,7 @@ void ModuleSummaryIndex::collectDefinedGVSummariesPerModule(
StringMap<GVSummaryMapTy> &ModuleToDefinedGVSummaries) const {
for (auto &GlobalList : *this) {
auto GUID = GlobalList.first;
- for (auto &Summary : GlobalList.second) {
+ for (auto &Summary : GlobalList.second.SummaryList) {
ModuleToDefinedGVSummaries[Summary->modulePath()][GUID] = Summary.get();
}
}
@@ -49,10 +49,10 @@ void ModuleSummaryIndex::collectDefinedGVSummariesPerModule(
GlobalValueSummary *
ModuleSummaryIndex::getGlobalValueSummary(uint64_t ValueGUID,
bool PerModuleIndex) const {
- auto SummaryList = findGlobalValueSummaryList(ValueGUID);
- assert(SummaryList != end() && "GlobalValue not found in index");
- assert((!PerModuleIndex || SummaryList->second.size() == 1) &&
+ auto VI = getValueInfo(ValueGUID);
+ assert(VI && "GlobalValue not found in index");
+ assert((!PerModuleIndex || VI.getSummaryList().size() == 1) &&
"Expected a single entry per global value in per-module index");
- auto &Summary = SummaryList->second[0];
+ auto &Summary = VI.getSummaryList()[0];
return Summary.get();
}
diff --git a/lib/LTO/LTO.cpp b/lib/LTO/LTO.cpp
index 0afa1ba6ecd6..2d2dcdec05fb 100644
--- a/lib/LTO/LTO.cpp
+++ b/lib/LTO/LTO.cpp
@@ -274,13 +274,14 @@ void llvm::thinLTOResolveWeakForLinkerInIndex(
// when needed.
DenseSet<GlobalValueSummary *> GlobalInvolvedWithAlias;
for (auto &I : Index)
- for (auto &S : I.second)
+ for (auto &S : I.second.SummaryList)
if (auto AS = dyn_cast<AliasSummary>(S.get()))
GlobalInvolvedWithAlias.insert(&AS->getAliasee());
for (auto &I : Index)
- thinLTOResolveWeakForLinkerGUID(I.second, I.first, GlobalInvolvedWithAlias,
- isPrevailing, recordNewLinkage);
+ thinLTOResolveWeakForLinkerGUID(I.second.SummaryList, I.first,
+ GlobalInvolvedWithAlias, isPrevailing,
+ recordNewLinkage);
}
static void thinLTOInternalizeAndPromoteGUID(
@@ -301,7 +302,7 @@ void llvm::thinLTOInternalizeAndPromoteInIndex(
ModuleSummaryIndex &Index,
function_ref<bool(StringRef, GlobalValue::GUID)> isExported) {
for (auto &I : Index)
- thinLTOInternalizeAndPromoteGUID(I.second, I.first, isExported);
+ thinLTOInternalizeAndPromoteGUID(I.second.SummaryList, I.first, isExported);
}
// Requires a destructor for std::vector<InputModule>.
diff --git a/lib/LTO/ThinLTOCodeGenerator.cpp b/lib/LTO/ThinLTOCodeGenerator.cpp
index 440275c34258..b4ee7c2b2fbc 100644
--- a/lib/LTO/ThinLTOCodeGenerator.cpp
+++ b/lib/LTO/ThinLTOCodeGenerator.cpp
@@ -119,8 +119,9 @@ static void computePrevailingCopies(
};
for (auto &I : Index) {
- if (HasMultipleCopies(I.second))
- PrevailingCopy[I.first] = getFirstDefinitionForLinker(I.second);
+ if (HasMultipleCopies(I.second.SummaryList))
+ PrevailingCopy[I.first] =
+ getFirstDefinitionForLinker(I.second.SummaryList);
}
}
diff --git a/lib/MC/ConstantPools.cpp b/lib/MC/ConstantPools.cpp
index 8c94e2780998..ca5440237e49 100644
--- a/lib/MC/ConstantPools.cpp
+++ b/lib/MC/ConstantPools.cpp
@@ -57,6 +57,10 @@ const MCExpr *ConstantPool::addEntry(const MCExpr *Value, MCContext &Context,
bool ConstantPool::empty() { return Entries.empty(); }
+void ConstantPool::clearCache() {
+ CachedEntries.clear();
+}
+
//
// AssemblerConstantPools implementation
//
@@ -98,6 +102,13 @@ void AssemblerConstantPools::emitForCurrentSection(MCStreamer &Streamer) {
}
}
+void AssemblerConstantPools::clearCacheForCurrentSection(MCStreamer &Streamer) {
+ MCSection *Section = Streamer.getCurrentSectionOnly();
+ if (ConstantPool *CP = getConstantPool(Section)) {
+ CP->clearCache();
+ }
+}
+
const MCExpr *AssemblerConstantPools::addEntry(MCStreamer &Streamer,
const MCExpr *Expr,
unsigned Size, SMLoc Loc) {
diff --git a/lib/MC/MCParser/AsmParser.cpp b/lib/MC/MCParser/AsmParser.cpp
index f36a21bf1121..66ba853da2fe 100644
--- a/lib/MC/MCParser/AsmParser.cpp
+++ b/lib/MC/MCParser/AsmParser.cpp
@@ -287,6 +287,7 @@ public:
/// }
private:
+ bool isAltmacroString(SMLoc &StrLoc, SMLoc &EndLoc);
bool parseStatement(ParseStatementInfo &Info,
MCAsmParserSemaCallback *SI);
bool parseCurlyBlockScope(SmallVectorImpl<AsmRewrite>& AsmStrRewrites);
@@ -1192,6 +1193,31 @@ AsmParser::applyModifierToExpr(const MCExpr *E,
llvm_unreachable("Invalid expression kind!");
}
+/// This function checks if the next token is <string> type or arithmetic.
+/// string that begin with character '<' must end with character '>'.
+/// otherwise it is arithmetics.
+/// If the function returns a 'true' value,
+/// the End argument will be filled with the last location pointed to the '>'
+/// character.
+
+/// There is a gap between the AltMacro's documentation and the single quote implementation.
+/// GCC does not fully support this feature and so we will not support it.
+/// TODO: Adding single quote as a string.
+bool AsmParser::isAltmacroString(SMLoc &StrLoc, SMLoc &EndLoc) {
+ assert((StrLoc.getPointer() != NULL) &&
+ "Argument to the function cannot be a NULL value");
+ const char *CharPtr = StrLoc.getPointer();
+ while ((*CharPtr != '>') && (*CharPtr != '\n') &&
+ (*CharPtr != '\r') && (*CharPtr != '\0')){
+ CharPtr++;
+ }
+ if (*CharPtr == '>') {
+ EndLoc = StrLoc.getFromPointer(CharPtr + 1);
+ return true;
+ }
+ return false;
+}
+
/// \brief Parse an expression and return it.
///
/// expr ::= expr &&,|| expr -> lowest.
@@ -2461,9 +2487,9 @@ bool AsmParser::parseMacroArguments(const MCAsmMacro *M,
if (NamedParametersFound && FA.Name.empty())
return Error(IDLoc, "cannot mix positional and keyword arguments");
+ SMLoc StrLoc = Lexer.getLoc();
+ SMLoc EndLoc;
if (Lexer.IsaAltMacroMode() && Lexer.is(AsmToken::Percent)) {
- SMLoc StrLoc = Lexer.getLoc();
- SMLoc EndLoc;
const MCExpr *AbsoluteExp;
int64_t Value;
/// Eat '%'
@@ -2476,8 +2502,16 @@ bool AsmParser::parseMacroArguments(const MCAsmMacro *M,
const char *EndChar = EndLoc.getPointer();
AsmToken newToken(AsmToken::Integer, StringRef(StrChar , EndChar - StrChar), Value);
FA.Value.push_back(newToken);
- }
- else if(parseMacroArgument(FA.Value, Vararg))
+ } else if (Lexer.IsaAltMacroMode() && Lexer.is(AsmToken::Less) &&
+ isAltmacroString(StrLoc, EndLoc)) {
+ const char *StrChar = StrLoc.getPointer();
+ const char *EndChar = EndLoc.getPointer();
+ jumpToLoc(EndLoc, CurBuffer);
+ /// Eat from '<' to '>'
+ Lex();
+ AsmToken newToken(AsmToken::String, StringRef(StrChar, EndChar - StrChar));
+ FA.Value.push_back(newToken);
+ } else if(parseMacroArgument(FA.Value, Vararg))
return true;
unsigned PI = Parameter;
diff --git a/lib/Object/COFFObjectFile.cpp b/lib/Object/COFFObjectFile.cpp
index 1866aba9b21a..b1223e81be43 100644
--- a/lib/Object/COFFObjectFile.cpp
+++ b/lib/Object/COFFObjectFile.cpp
@@ -19,6 +19,7 @@
#include "llvm/Object/COFF.h"
#include "llvm/Object/Error.h"
#include "llvm/Object/ObjectFile.h"
+#include "llvm/Support/BinaryStreamReader.h"
#include "llvm/Support/COFF.h"
#include "llvm/Support/Endian.h"
#include "llvm/Support/Error.h"
@@ -159,8 +160,7 @@ void COFFObjectFile::moveSymbolNext(DataRefImpl &Ref) const {
Expected<StringRef> COFFObjectFile::getSymbolName(DataRefImpl Ref) const {
COFFSymbolRef Symb = getCOFFSymbol(Ref);
StringRef Result;
- std::error_code EC = getSymbolName(Symb, Result);
- if (EC)
+ if (std::error_code EC = getSymbolName(Symb, Result))
return errorCodeToError(EC);
return Result;
}
@@ -1591,3 +1591,44 @@ std::error_code BaseRelocRef::getRVA(uint32_t &Result) const {
Result = Header->PageRVA + Entry[Index].getOffset();
return std::error_code();
}
+
+#define RETURN_IF_ERROR(X) \
+ if (auto EC = errorToErrorCode(X)) \
+ return EC;
+
+ErrorOr<ArrayRef<UTF16>> ResourceSectionRef::getDirStringAtOffset(uint32_t Offset) {
+ BinaryStreamReader Reader = BinaryStreamReader(BBS);
+ Reader.setOffset(Offset);
+ uint16_t Length;
+ RETURN_IF_ERROR(Reader.readInteger(Length));
+ ArrayRef<UTF16> RawDirString;
+ // Strings are stored as 2-byte aligned unicode characters but readFixedString
+ // assumes byte string, so we double length.
+ RETURN_IF_ERROR(Reader.readArray(RawDirString, Length));
+ return RawDirString;
+}
+
+ErrorOr<ArrayRef<UTF16>>
+ResourceSectionRef::getEntryNameString(const coff_resource_dir_entry &Entry) {
+ return getDirStringAtOffset(Entry.Identifier.getNameOffset());
+}
+
+ErrorOr<const coff_resource_dir_table &>
+ResourceSectionRef::getTableAtOffset(uint32_t Offset) {
+ const coff_resource_dir_table *Table = nullptr;
+
+ BinaryStreamReader Reader(BBS);
+ Reader.setOffset(Offset);
+ RETURN_IF_ERROR(Reader.readObject(Table));
+ assert(Table != nullptr);
+ return *Table;
+}
+
+ErrorOr<const coff_resource_dir_table &>
+ResourceSectionRef::getEntrySubDir(const coff_resource_dir_entry &Entry) {
+ return getTableAtOffset(Entry.Offset.value());
+}
+
+ErrorOr<const coff_resource_dir_table &> ResourceSectionRef::getBaseTable() {
+ return getTableAtOffset(0);
+}
diff --git a/lib/Object/WasmObjectFile.cpp b/lib/Object/WasmObjectFile.cpp
index 9f3486e58a11..39f8704aacf2 100644
--- a/lib/Object/WasmObjectFile.cpp
+++ b/lib/Object/WasmObjectFile.cpp
@@ -253,11 +253,12 @@ Error WasmObjectFile::parseNameSection(const uint8_t *Ptr, const uint8_t *End) {
case wasm::WASM_NAMES_FUNCTION: {
uint32_t Count = readVaruint32(Ptr);
while (Count--) {
- /*uint32_t Index =*/readVaruint32(Ptr);
+ uint32_t Index = readVaruint32(Ptr);
StringRef Name = readString(Ptr);
if (!Name.empty())
Symbols.emplace_back(Name,
- WasmSymbol::SymbolType::DEBUG_FUNCTION_NAME);
+ WasmSymbol::SymbolType::DEBUG_FUNCTION_NAME,
+ Sections.size(), Index);
}
break;
}
@@ -384,7 +385,7 @@ Error WasmObjectFile::parseTypeSection(const uint8_t *Ptr, const uint8_t *End) {
Error WasmObjectFile::parseImportSection(const uint8_t *Ptr, const uint8_t *End) {
uint32_t Count = readVaruint32(Ptr);
Imports.reserve(Count);
- while (Count--) {
+ for (uint32_t i = 0; i < Count; i++) {
wasm::WasmImport Im;
Im.Module = readString(Ptr);
Im.Field = readString(Ptr);
@@ -392,12 +393,14 @@ Error WasmObjectFile::parseImportSection(const uint8_t *Ptr, const uint8_t *End)
switch (Im.Kind) {
case wasm::WASM_EXTERNAL_FUNCTION:
Im.SigIndex = readVaruint32(Ptr);
- Symbols.emplace_back(Im.Field, WasmSymbol::SymbolType::FUNCTION_IMPORT);
+ Symbols.emplace_back(Im.Field, WasmSymbol::SymbolType::FUNCTION_IMPORT,
+ Sections.size(), i);
break;
case wasm::WASM_EXTERNAL_GLOBAL:
Im.GlobalType = readVarint7(Ptr);
Im.GlobalMutable = readVaruint1(Ptr);
- Symbols.emplace_back(Im.Field, WasmSymbol::SymbolType::GLOBAL_IMPORT);
+ Symbols.emplace_back(Im.Field, WasmSymbol::SymbolType::GLOBAL_IMPORT,
+ Sections.size(), i);
break;
default:
// TODO(sbc): Handle other kinds of imports
@@ -475,7 +478,7 @@ Error WasmObjectFile::parseGlobalSection(const uint8_t *Ptr, const uint8_t *End)
Error WasmObjectFile::parseExportSection(const uint8_t *Ptr, const uint8_t *End) {
uint32_t Count = readVaruint32(Ptr);
Exports.reserve(Count);
- while (Count--) {
+ for (uint32_t i = 0; i < Count; i++) {
wasm::WasmExport Ex;
Ex.Name = readString(Ptr);
Ex.Kind = readUint8(Ptr);
@@ -483,10 +486,12 @@ Error WasmObjectFile::parseExportSection(const uint8_t *Ptr, const uint8_t *End)
Exports.push_back(Ex);
switch (Ex.Kind) {
case wasm::WASM_EXTERNAL_FUNCTION:
- Symbols.emplace_back(Ex.Name, WasmSymbol::SymbolType::FUNCTION_EXPORT);
+ Symbols.emplace_back(Ex.Name, WasmSymbol::SymbolType::FUNCTION_EXPORT,
+ Sections.size(), i);
break;
case wasm::WASM_EXTERNAL_GLOBAL:
- Symbols.emplace_back(Ex.Name, WasmSymbol::SymbolType::GLOBAL_EXPORT);
+ Symbols.emplace_back(Ex.Name, WasmSymbol::SymbolType::GLOBAL_EXPORT,
+ Sections.size(), i);
break;
default:
// TODO(sbc): Handle other kinds of exports
@@ -597,20 +602,28 @@ const wasm::WasmObjectHeader &WasmObjectFile::getHeader() const {
void WasmObjectFile::moveSymbolNext(DataRefImpl &Symb) const { Symb.d.a++; }
uint32_t WasmObjectFile::getSymbolFlags(DataRefImpl Symb) const {
+ uint32_t Result = SymbolRef::SF_None;
const WasmSymbol &Sym = getWasmSymbol(Symb);
+
switch (Sym.Type) {
case WasmSymbol::SymbolType::FUNCTION_IMPORT:
- return object::SymbolRef::SF_Undefined | SymbolRef::SF_Executable;
+ Result |= SymbolRef::SF_Undefined | SymbolRef::SF_Executable;
+ break;
case WasmSymbol::SymbolType::FUNCTION_EXPORT:
- return object::SymbolRef::SF_Global | SymbolRef::SF_Executable;
+ Result |= SymbolRef::SF_Global | SymbolRef::SF_Executable;
+ break;
case WasmSymbol::SymbolType::DEBUG_FUNCTION_NAME:
- return object::SymbolRef::SF_Executable;
+ Result |= SymbolRef::SF_Executable;
+ break;
case WasmSymbol::SymbolType::GLOBAL_IMPORT:
- return object::SymbolRef::SF_Undefined;
+ Result |= SymbolRef::SF_Undefined;
+ break;
case WasmSymbol::SymbolType::GLOBAL_EXPORT:
- return object::SymbolRef::SF_Global;
+ Result |= SymbolRef::SF_Global;
+ break;
}
- llvm_unreachable("Unknown WasmSymbol::SymbolType");
+
+ return Result;
}
basic_symbol_iterator WasmObjectFile::symbol_begin() const {
@@ -635,12 +648,12 @@ Expected<StringRef> WasmObjectFile::getSymbolName(DataRefImpl Symb) const {
}
Expected<uint64_t> WasmObjectFile::getSymbolAddress(DataRefImpl Symb) const {
- return (uint64_t)Symb.d.a;
+ return getSymbolValue(Symb);
}
uint64_t WasmObjectFile::getSymbolValueImpl(DataRefImpl Symb) const {
- llvm_unreachable("not yet implemented");
- return 0;
+ const WasmSymbol &Sym = getWasmSymbol(Symb);
+ return Sym.ElementIndex;
}
uint32_t WasmObjectFile::getSymbolAlignment(DataRefImpl Symb) const {
@@ -655,14 +668,27 @@ uint64_t WasmObjectFile::getCommonSymbolSizeImpl(DataRefImpl Symb) const {
Expected<SymbolRef::Type>
WasmObjectFile::getSymbolType(DataRefImpl Symb) const {
- llvm_unreachable("not yet implemented");
- return errorCodeToError(object_error::invalid_symbol_index);
+ const WasmSymbol &Sym = getWasmSymbol(Symb);
+
+ switch (Sym.Type) {
+ case WasmSymbol::SymbolType::FUNCTION_IMPORT:
+ case WasmSymbol::SymbolType::FUNCTION_EXPORT:
+ case WasmSymbol::SymbolType::DEBUG_FUNCTION_NAME:
+ return SymbolRef::ST_Function;
+ case WasmSymbol::SymbolType::GLOBAL_IMPORT:
+ case WasmSymbol::SymbolType::GLOBAL_EXPORT:
+ return SymbolRef::ST_Data;
+ }
+
+ llvm_unreachable("Unknown WasmSymbol::SymbolType");
+ return SymbolRef::ST_Other;
}
Expected<section_iterator>
WasmObjectFile::getSymbolSection(DataRefImpl Symb) const {
- llvm_unreachable("not yet implemented");
- return errorCodeToError(object_error::invalid_symbol_index);
+ DataRefImpl Ref;
+ Ref.d.a = getWasmSymbol(Symb).Section;
+ return section_iterator(SectionRef(Ref, this));
}
void WasmObjectFile::moveSectionNext(DataRefImpl &Sec) const { Sec.d.a++; }
diff --git a/lib/ObjectYAML/WasmYAML.cpp b/lib/ObjectYAML/WasmYAML.cpp
index 9b1ff7e5dc16..c5d1b438ee2a 100644
--- a/lib/ObjectYAML/WasmYAML.cpp
+++ b/lib/ObjectYAML/WasmYAML.cpp
@@ -50,7 +50,11 @@ static void commonSectionMapping(IO &IO, WasmYAML::Section &Section) {
static void sectionMapping(IO &IO, WasmYAML::CustomSection &Section) {
commonSectionMapping(IO, Section);
IO.mapRequired("Name", Section.Name);
- IO.mapRequired("Payload", Section.Payload);
+ if (Section.Name == "name") {
+ IO.mapOptional("FunctionNames", Section.FunctionNames);
+ } else {
+ IO.mapRequired("Payload", Section.Payload);
+ }
}
static void sectionMapping(IO &IO, WasmYAML::TypeSection &Section) {
@@ -226,6 +230,12 @@ void MappingTraits<WasmYAML::Relocation>::mapping(
IO.mapOptional("Addend", Relocation.Addend, 0);
}
+void MappingTraits<WasmYAML::NameEntry>::mapping(
+ IO &IO, WasmYAML::NameEntry &NameEntry) {
+ IO.mapRequired("Index", NameEntry.Index);
+ IO.mapRequired("Name", NameEntry.Name);
+}
+
void MappingTraits<WasmYAML::LocalDecl>::mapping(
IO &IO, WasmYAML::LocalDecl &LocalDecl) {
IO.mapRequired("Type", LocalDecl.Type);
diff --git a/lib/Passes/PassBuilder.cpp b/lib/Passes/PassBuilder.cpp
index 8db65f7f0e82..7076e751071d 100644
--- a/lib/Passes/PassBuilder.cpp
+++ b/lib/Passes/PassBuilder.cpp
@@ -505,6 +505,10 @@ PassBuilder::buildPerModuleDefaultPipeline(OptimizationLevel Level,
// the CGSCC pipeline.
MPM.addPass(RequireAnalysisPass<GlobalsAA, Module>());
+ // Require the ProfileSummaryAnalysis for the module so we can query it within
+ // the inliner pass.
+ MPM.addPass(RequireAnalysisPass<ProfileSummaryAnalysis, Module>());
+
// Now begin the main postorder CGSCC pipeline.
// FIXME: The current CGSCC pipeline has its origins in the legacy pass
// manager and trying to emulate its precise behavior. Much of this doesn't
diff --git a/lib/Support/APInt.cpp b/lib/Support/APInt.cpp
index fa81b28cd083..caa0691f9205 100644
--- a/lib/Support/APInt.cpp
+++ b/lib/Support/APInt.cpp
@@ -225,114 +225,17 @@ APInt& APInt::operator-=(uint64_t RHS) {
return clearUnusedBits();
}
-/// Multiplies an integer array, x, by a uint64_t integer and places the result
-/// into dest.
-/// @returns the carry out of the multiplication.
-/// @brief Multiply a multi-digit APInt by a single digit (64-bit) integer.
-static uint64_t mul_1(uint64_t dest[], uint64_t x[], unsigned len, uint64_t y) {
- // Split y into high 32-bit part (hy) and low 32-bit part (ly)
- uint64_t ly = y & 0xffffffffULL, hy = y >> 32;
- uint64_t carry = 0;
-
- // For each digit of x.
- for (unsigned i = 0; i < len; ++i) {
- // Split x into high and low words
- uint64_t lx = x[i] & 0xffffffffULL;
- uint64_t hx = x[i] >> 32;
- // hasCarry - A flag to indicate if there is a carry to the next digit.
- // hasCarry == 0, no carry
- // hasCarry == 1, has carry
- // hasCarry == 2, no carry and the calculation result == 0.
- uint8_t hasCarry = 0;
- dest[i] = carry + lx * ly;
- // Determine if the add above introduces carry.
- hasCarry = (dest[i] < carry) ? 1 : 0;
- carry = hx * ly + (dest[i] >> 32) + (hasCarry ? (1ULL << 32) : 0);
- // The upper limit of carry can be (2^32 - 1)(2^32 - 1) +
- // (2^32 - 1) + 2^32 = 2^64.
- hasCarry = (!carry && hasCarry) ? 1 : (!carry ? 2 : 0);
-
- carry += (lx * hy) & 0xffffffffULL;
- dest[i] = (carry << 32) | (dest[i] & 0xffffffffULL);
- carry = (((!carry && hasCarry != 2) || hasCarry == 1) ? (1ULL << 32) : 0) +
- (carry >> 32) + ((lx * hy) >> 32) + hx * hy;
- }
- return carry;
-}
-
-/// Multiplies integer array x by integer array y and stores the result into
-/// the integer array dest. Note that dest's size must be >= xlen + ylen.
-/// @brief Generalized multiplication of integer arrays.
-static void mul(uint64_t dest[], uint64_t x[], unsigned xlen, uint64_t y[],
- unsigned ylen) {
- dest[xlen] = mul_1(dest, x, xlen, y[0]);
- for (unsigned i = 1; i < ylen; ++i) {
- uint64_t ly = y[i] & 0xffffffffULL, hy = y[i] >> 32;
- uint64_t carry = 0, lx = 0, hx = 0;
- for (unsigned j = 0; j < xlen; ++j) {
- lx = x[j] & 0xffffffffULL;
- hx = x[j] >> 32;
- // hasCarry - A flag to indicate if has carry.
- // hasCarry == 0, no carry
- // hasCarry == 1, has carry
- // hasCarry == 2, no carry and the calculation result == 0.
- uint8_t hasCarry = 0;
- uint64_t resul = carry + lx * ly;
- hasCarry = (resul < carry) ? 1 : 0;
- carry = (hasCarry ? (1ULL << 32) : 0) + hx * ly + (resul >> 32);
- hasCarry = (!carry && hasCarry) ? 1 : (!carry ? 2 : 0);
-
- carry += (lx * hy) & 0xffffffffULL;
- resul = (carry << 32) | (resul & 0xffffffffULL);
- dest[i+j] += resul;
- carry = (((!carry && hasCarry != 2) || hasCarry == 1) ? (1ULL << 32) : 0)+
- (carry >> 32) + (dest[i+j] < resul ? 1 : 0) +
- ((lx * hy) >> 32) + hx * hy;
- }
- dest[i+xlen] = carry;
- }
-}
-
-APInt& APInt::operator*=(const APInt& RHS) {
+APInt APInt::operator*(const APInt& RHS) const {
assert(BitWidth == RHS.BitWidth && "Bit widths must be the same");
- if (isSingleWord()) {
- U.VAL *= RHS.U.VAL;
- clearUnusedBits();
- return *this;
- }
-
- // Get some bit facts about LHS and check for zero
- unsigned lhsBits = getActiveBits();
- unsigned lhsWords = !lhsBits ? 0 : whichWord(lhsBits - 1) + 1;
- if (!lhsWords)
- // 0 * X ===> 0
- return *this;
-
- // Get some bit facts about RHS and check for zero
- unsigned rhsBits = RHS.getActiveBits();
- unsigned rhsWords = !rhsBits ? 0 : whichWord(rhsBits - 1) + 1;
- if (!rhsWords) {
- // X * 0 ===> 0
- clearAllBits();
- return *this;
- }
-
- // Allocate space for the result
- unsigned destWords = rhsWords + lhsWords;
- uint64_t *dest = getMemory(destWords);
+ if (isSingleWord())
+ return APInt(BitWidth, U.VAL * RHS.U.VAL);
- // Perform the long multiply
- mul(dest, U.pVal, lhsWords, RHS.U.pVal, rhsWords);
+ APInt Result(getMemory(getNumWords()), getBitWidth());
- // Copy result back into *this
- clearAllBits();
- unsigned wordsToCopy = destWords >= getNumWords() ? getNumWords() : destWords;
- memcpy(U.pVal, dest, wordsToCopy * APINT_WORD_SIZE);
- clearUnusedBits();
+ tcMultiply(Result.U.pVal, U.pVal, RHS.U.pVal, getNumWords());
- // delete dest array and return
- delete[] dest;
- return *this;
+ Result.clearUnusedBits();
+ return Result;
}
void APInt::AndAssignSlowCase(const APInt& RHS) {
@@ -347,13 +250,20 @@ void APInt::XorAssignSlowCase(const APInt& RHS) {
tcXor(U.pVal, RHS.U.pVal, getNumWords());
}
-APInt APInt::operator*(const APInt& RHS) const {
+APInt& APInt::operator*=(const APInt& RHS) {
assert(BitWidth == RHS.BitWidth && "Bit widths must be the same");
- if (isSingleWord())
- return APInt(BitWidth, U.VAL * RHS.U.VAL);
- APInt Result(*this);
- Result *= RHS;
- return Result;
+ *this = *this * RHS;
+ return *this;
+}
+
+APInt& APInt::operator*=(uint64_t RHS) {
+ if (isSingleWord()) {
+ U.VAL *= RHS;
+ } else {
+ unsigned NumWords = getNumWords();
+ tcMultiplyPart(U.pVal, U.pVal, RHS, 0, NumWords, NumWords, false);
+ }
+ return clearUnusedBits();
}
bool APInt::EqualSlowCase(const APInt& RHS) const {
@@ -1932,10 +1842,6 @@ void APInt::fromString(unsigned numbits, StringRef str, uint8_t radix) {
// Figure out if we can shift instead of multiply
unsigned shift = (radix == 16 ? 4 : radix == 8 ? 3 : radix == 2 ? 1 : 0);
- // Set up an APInt for the radix multiplier outside the loop so we don't
- // constantly construct/destruct it.
- APInt apradix(getBitWidth(), radix);
-
// Enter digit traversal loop
for (StringRef::iterator e = str.end(); p != e; ++p) {
unsigned digit = getDigit(*p, radix);
@@ -1946,7 +1852,7 @@ void APInt::fromString(unsigned numbits, StringRef str, uint8_t radix) {
if (shift)
*this <<= shift;
else
- *this *= apradix;
+ *this *= radix;
}
// Add in the digit we just interpreted
@@ -2346,10 +2252,9 @@ int APInt::tcMultiplyPart(WordType *dst, const WordType *src,
assert(dstParts <= srcParts + 1);
/* N loops; minimum of dstParts and srcParts. */
- unsigned n = dstParts < srcParts ? dstParts: srcParts;
+ unsigned n = std::min(dstParts, srcParts);
- unsigned i;
- for (i = 0; i < n; i++) {
+ for (unsigned i = 0; i < n; i++) {
WordType low, mid, high, srcPart;
/* [ LOW, HIGH ] = MULTIPLIER * SRC[i] + DST[i] + CARRY.
@@ -2400,27 +2305,27 @@ int APInt::tcMultiplyPart(WordType *dst, const WordType *src,
carry = high;
}
- if (i < dstParts) {
+ if (srcParts < dstParts) {
/* Full multiplication, there is no overflow. */
- assert(i + 1 == dstParts);
- dst[i] = carry;
- return 0;
- } else {
- /* We overflowed if there is carry. */
- if (carry)
- return 1;
-
- /* We would overflow if any significant unwritten parts would be
- non-zero. This is true if any remaining src parts are non-zero
- and the multiplier is non-zero. */
- if (multiplier)
- for (; i < srcParts; i++)
- if (src[i])
- return 1;
-
- /* We fitted in the narrow destination. */
+ assert(srcParts + 1 == dstParts);
+ dst[srcParts] = carry;
return 0;
}
+
+ /* We overflowed if there is carry. */
+ if (carry)
+ return 1;
+
+ /* We would overflow if any significant unwritten parts would be
+ non-zero. This is true if any remaining src parts are non-zero
+ and the multiplier is non-zero. */
+ if (multiplier)
+ for (unsigned i = dstParts; i < srcParts; i++)
+ if (src[i])
+ return 1;
+
+ /* We fitted in the narrow destination. */
+ return 0;
}
/* DST = LHS * RHS, where DST has the same width as the operands and
@@ -2449,20 +2354,19 @@ unsigned APInt::tcFullMultiply(WordType *dst, const WordType *lhs,
const WordType *rhs, unsigned lhsParts,
unsigned rhsParts) {
/* Put the narrower number on the LHS for less loops below. */
- if (lhsParts > rhsParts) {
+ if (lhsParts > rhsParts)
return tcFullMultiply (dst, rhs, lhs, rhsParts, lhsParts);
- } else {
- assert(dst != lhs && dst != rhs);
- tcSet(dst, 0, rhsParts);
+ assert(dst != lhs && dst != rhs);
- for (unsigned i = 0; i < lhsParts; i++)
- tcMultiplyPart(&dst[i], rhs, lhs[i], 0, rhsParts, rhsParts + 1, true);
+ tcSet(dst, 0, rhsParts);
- unsigned n = lhsParts + rhsParts;
+ for (unsigned i = 0; i < lhsParts; i++)
+ tcMultiplyPart(&dst[i], rhs, lhs[i], 0, rhsParts, rhsParts + 1, true);
- return n - (dst[n - 1] == 0);
- }
+ unsigned n = lhsParts + rhsParts;
+
+ return n - (dst[n - 1] == 0);
}
/* If RHS is zero LHS and REMAINDER are left unchanged, return one.
diff --git a/lib/Support/TargetParser.cpp b/lib/Support/TargetParser.cpp
index bba7c6d0d604..b16351906a4c 100644
--- a/lib/Support/TargetParser.cpp
+++ b/lib/Support/TargetParser.cpp
@@ -422,8 +422,10 @@ unsigned llvm::AArch64::getDefaultExtensions(StringRef CPU, unsigned ArchKind) {
return AArch64ARCHNames[ArchKind].ArchBaseExtensions;
return StringSwitch<unsigned>(CPU)
-#define AARCH64_CPU_NAME(NAME, ID, DEFAULT_FPU, IS_DEFAULT, DEFAULT_EXT) \
- .Case(NAME, DEFAULT_EXT)
+#define AARCH64_CPU_NAME(NAME, ID, DEFAULT_FPU, IS_DEFAULT, DEFAULT_EXT) \
+ .Case(NAME, \
+ AArch64ARCHNames[(unsigned)AArch64::ArchKind::ID].ArchBaseExtensions | \
+ DEFAULT_EXT)
#include "llvm/Support/AArch64TargetParser.def"
.Default(AArch64::AEK_INVALID);
}
diff --git a/lib/Support/Unix/DynamicLibrary.inc b/lib/Support/Unix/DynamicLibrary.inc
index a0110e7044ee..a0526fa2c1b8 100644
--- a/lib/Support/Unix/DynamicLibrary.inc
+++ b/lib/Support/Unix/DynamicLibrary.inc
@@ -31,7 +31,7 @@ void *DynamicLibrary::HandleSet::DLOpen(const char *File, std::string *Err) {
#ifdef __CYGWIN__
// Cygwin searches symbols only in the main
// with the handle of dlopen(NULL, RTLD_GLOBAL).
- if (!Filename)
+ if (!File)
Handle = RTLD_DEFAULT;
#endif
diff --git a/lib/Support/Unix/Path.inc b/lib/Support/Unix/Path.inc
index 93f8982196b3..fa28ba1b6ab6 100644
--- a/lib/Support/Unix/Path.inc
+++ b/lib/Support/Unix/Path.inc
@@ -421,14 +421,15 @@ std::error_code resize_file(int FD, uint64_t Size) {
#if defined(HAVE_POSIX_FALLOCATE)
// If we have posix_fallocate use it. Unlike ftruncate it always allocates
// space, so we get an error if the disk is full.
- if (int Err = ::posix_fallocate(FD, 0, Size))
- return std::error_code(Err, std::generic_category());
-#else
+ if (int Err = ::posix_fallocate(FD, 0, Size)) {
+ if (Err != EOPNOTSUPP)
+ return std::error_code(Err, std::generic_category());
+ }
+#endif
// Use ftruncate as a fallback. It may or may not allocate space. At least on
// OS X with HFS+ it does.
if (::ftruncate(FD, Size) == -1)
return std::error_code(errno, std::generic_category());
-#endif
return std::error_code();
}
diff --git a/lib/Target/AArch64/AArch64.h b/lib/Target/AArch64/AArch64.h
index b44b13e36e15..3e0e3978b90b 100644
--- a/lib/Target/AArch64/AArch64.h
+++ b/lib/Target/AArch64/AArch64.h
@@ -41,7 +41,6 @@ FunctionPass *createAArch64LoadStoreOptimizationPass();
FunctionPass *createAArch64VectorByElementOptPass();
ModulePass *createAArch64PromoteConstantPass();
FunctionPass *createAArch64ConditionOptimizerPass();
-FunctionPass *createAArch64AddressTypePromotionPass();
FunctionPass *createAArch64A57FPLoadBalancing();
FunctionPass *createAArch64A53Fix835769();
@@ -54,7 +53,6 @@ createAArch64InstructionSelector(const AArch64TargetMachine &,
void initializeAArch64A53Fix835769Pass(PassRegistry&);
void initializeAArch64A57FPLoadBalancingPass(PassRegistry&);
-void initializeAArch64AddressTypePromotionPass(PassRegistry&);
void initializeAArch64AdvSIMDScalarPass(PassRegistry&);
void initializeAArch64CollectLOHPass(PassRegistry&);
void initializeAArch64ConditionalComparesPass(PassRegistry&);
diff --git a/lib/Target/AArch64/AArch64.td b/lib/Target/AArch64/AArch64.td
index 519ca2894683..73f2b6a25f66 100644
--- a/lib/Target/AArch64/AArch64.td
+++ b/lib/Target/AArch64/AArch64.td
@@ -358,7 +358,6 @@ def ProcThunderXT83 : SubtargetFeature<"thunderxt83", "ARMProcFamily",
FeatureNEON]>;
def : ProcessorModel<"generic", NoSchedModel, [
- FeatureCRC,
FeatureFPARMv8,
FeatureNEON,
FeaturePerfMon,
diff --git a/lib/Target/AArch64/AArch64AddressTypePromotion.cpp b/lib/Target/AArch64/AArch64AddressTypePromotion.cpp
deleted file mode 100644
index e1b8ee6d03c3..000000000000
--- a/lib/Target/AArch64/AArch64AddressTypePromotion.cpp
+++ /dev/null
@@ -1,493 +0,0 @@
-//===-- AArch64AddressTypePromotion.cpp --- Promote type for addr accesses -==//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// This pass tries to promote the computations use to obtained a sign extended
-// value used into memory accesses.
-// E.g.
-// a = add nsw i32 b, 3
-// d = sext i32 a to i64
-// e = getelementptr ..., i64 d
-//
-// =>
-// f = sext i32 b to i64
-// a = add nsw i64 f, 3
-// e = getelementptr ..., i64 a
-//
-// This is legal to do if the computations are marked with either nsw or nuw
-// markers. Moreover, the current heuristic is simple: it does not create new
-// sext operations, i.e., it gives up when a sext would have forked (e.g., if a
-// = add i32 b, c, two sexts are required to promote the computation).
-//
-// FIXME: This pass may be useful for other targets too.
-// ===---------------------------------------------------------------------===//
-
-#include "AArch64.h"
-#include "llvm/ADT/DenseMap.h"
-#include "llvm/ADT/SmallPtrSet.h"
-#include "llvm/ADT/SmallVector.h"
-#include "llvm/ADT/StringRef.h"
-#include "llvm/IR/Constants.h"
-#include "llvm/IR/Dominators.h"
-#include "llvm/IR/Function.h"
-#include "llvm/IR/InstrTypes.h"
-#include "llvm/IR/Instruction.h"
-#include "llvm/IR/Instructions.h"
-#include "llvm/IR/Operator.h"
-#include "llvm/IR/Type.h"
-#include "llvm/IR/Use.h"
-#include "llvm/IR/User.h"
-#include "llvm/Pass.h"
-#include "llvm/Support/Casting.h"
-#include "llvm/Support/CommandLine.h"
-#include "llvm/Support/Debug.h"
-#include "llvm/Support/raw_ostream.h"
-#include <cassert>
-
-using namespace llvm;
-
-#define DEBUG_TYPE "aarch64-type-promotion"
-
-static cl::opt<bool>
-EnableMerge("aarch64-type-promotion-merge", cl::Hidden,
- cl::desc("Enable merging of redundant sexts when one is dominating"
- " the other."),
- cl::init(true));
-
-#define AARCH64_TYPE_PROMO_NAME "AArch64 Address Type Promotion"
-
-//===----------------------------------------------------------------------===//
-// AArch64AddressTypePromotion
-//===----------------------------------------------------------------------===//
-
-namespace {
-
-class AArch64AddressTypePromotion : public FunctionPass {
-public:
- static char ID;
-
- AArch64AddressTypePromotion() : FunctionPass(ID) {
- initializeAArch64AddressTypePromotionPass(*PassRegistry::getPassRegistry());
- }
-
- StringRef getPassName() const override { return AARCH64_TYPE_PROMO_NAME; }
-
- /// Iterate over the functions and promote the computation of interesting
- // sext instructions.
- bool runOnFunction(Function &F) override;
-
-private:
- /// The current function.
- Function *Func = nullptr;
-
- /// Filter out all sexts that does not have this type.
- /// Currently initialized with Int64Ty.
- Type *ConsideredSExtType = nullptr;
-
- // This transformation requires dominator info.
- void getAnalysisUsage(AnalysisUsage &AU) const override {
- AU.setPreservesCFG();
- AU.addRequired<DominatorTreeWrapperPass>();
- AU.addPreserved<DominatorTreeWrapperPass>();
- FunctionPass::getAnalysisUsage(AU);
- }
-
- typedef SmallPtrSet<Instruction *, 32> SetOfInstructions;
- typedef SmallVector<Instruction *, 16> Instructions;
- typedef DenseMap<Value *, Instructions> ValueToInsts;
-
- /// Check if it is profitable to move a sext through this instruction.
- /// Currently, we consider it is profitable if:
- /// - Inst is used only once (no need to insert truncate).
- /// - Inst has only one operand that will require a sext operation (we do
- /// do not create new sext operation).
- bool shouldGetThrough(const Instruction *Inst);
-
- /// Check if it is possible and legal to move a sext through this
- /// instruction.
- /// Current heuristic considers that we can get through:
- /// - Arithmetic operation marked with the nsw or nuw flag.
- /// - Other sext operation.
- /// - Truncate operation if it was just dropping sign extended bits.
- bool canGetThrough(const Instruction *Inst);
-
- /// Move sext operations through safe to sext instructions.
- bool propagateSignExtension(Instructions &SExtInsts);
-
- /// Is this sext should be considered for code motion.
- /// We look for sext with ConsideredSExtType and uses in at least one
- // GetElementPtrInst.
- bool shouldConsiderSExt(const Instruction *SExt) const;
-
- /// Collect all interesting sext operations, i.e., the ones with the right
- /// type and used in memory accesses.
- /// More precisely, a sext instruction is considered as interesting if it
- /// is used in a "complex" getelementptr or it exits at least another
- /// sext instruction that sign extended the same initial value.
- /// A getelementptr is considered as "complex" if it has more than 2
- // operands.
- void analyzeSExtension(Instructions &SExtInsts);
-
- /// Merge redundant sign extension operations in common dominator.
- void mergeSExts(ValueToInsts &ValToSExtendedUses,
- SetOfInstructions &ToRemove);
-};
-
-} // end anonymous namespace
-
-char AArch64AddressTypePromotion::ID = 0;
-
-INITIALIZE_PASS_BEGIN(AArch64AddressTypePromotion, "aarch64-type-promotion",
- AARCH64_TYPE_PROMO_NAME, false, false)
-INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass)
-INITIALIZE_PASS_END(AArch64AddressTypePromotion, "aarch64-type-promotion",
- AARCH64_TYPE_PROMO_NAME, false, false)
-
-FunctionPass *llvm::createAArch64AddressTypePromotionPass() {
- return new AArch64AddressTypePromotion();
-}
-
-bool AArch64AddressTypePromotion::canGetThrough(const Instruction *Inst) {
- if (isa<SExtInst>(Inst))
- return true;
-
- const BinaryOperator *BinOp = dyn_cast<BinaryOperator>(Inst);
- if (BinOp && isa<OverflowingBinaryOperator>(BinOp) &&
- (BinOp->hasNoUnsignedWrap() || BinOp->hasNoSignedWrap()))
- return true;
-
- // sext(trunc(sext)) --> sext
- if (isa<TruncInst>(Inst) && isa<SExtInst>(Inst->getOperand(0))) {
- const Instruction *Opnd = cast<Instruction>(Inst->getOperand(0));
- // Check that the truncate just drop sign extended bits.
- if (Inst->getType()->getIntegerBitWidth() >=
- Opnd->getOperand(0)->getType()->getIntegerBitWidth() &&
- Inst->getOperand(0)->getType()->getIntegerBitWidth() <=
- ConsideredSExtType->getIntegerBitWidth())
- return true;
- }
-
- return false;
-}
-
-bool AArch64AddressTypePromotion::shouldGetThrough(const Instruction *Inst) {
- // If the type of the sext is the same as the considered one, this sext
- // will become useless.
- // Otherwise, we will have to do something to preserve the original value,
- // unless it is used once.
- if (isa<SExtInst>(Inst) &&
- (Inst->getType() == ConsideredSExtType || Inst->hasOneUse()))
- return true;
-
- // If the Inst is used more that once, we may need to insert truncate
- // operations and we don't do that at the moment.
- if (!Inst->hasOneUse())
- return false;
-
- // This truncate is used only once, thus if we can get thourgh, it will become
- // useless.
- if (isa<TruncInst>(Inst))
- return true;
-
- // If both operands are not constant, a new sext will be created here.
- // Current heuristic is: each step should be profitable.
- // Therefore we don't allow to increase the number of sext even if it may
- // be profitable later on.
- if (isa<BinaryOperator>(Inst) && isa<ConstantInt>(Inst->getOperand(1)))
- return true;
-
- return false;
-}
-
-static bool shouldSExtOperand(const Instruction *Inst, int OpIdx) {
- return !(isa<SelectInst>(Inst) && OpIdx == 0);
-}
-
-bool
-AArch64AddressTypePromotion::shouldConsiderSExt(const Instruction *SExt) const {
- if (SExt->getType() != ConsideredSExtType)
- return false;
-
- for (const User *U : SExt->users()) {
- if (isa<GetElementPtrInst>(U))
- return true;
- }
-
- return false;
-}
-
-// Input:
-// - SExtInsts contains all the sext instructions that are used directly in
-// GetElementPtrInst, i.e., access to memory.
-// Algorithm:
-// - For each sext operation in SExtInsts:
-// Let var be the operand of sext.
-// while it is profitable (see shouldGetThrough), legal, and safe
-// (see canGetThrough) to move sext through var's definition:
-// * promote the type of var's definition.
-// * fold var into sext uses.
-// * move sext above var's definition.
-// * update sext operand to use the operand of var that should be sign
-// extended (by construction there is only one).
-//
-// E.g.,
-// a = ... i32 c, 3
-// b = sext i32 a to i64 <- is it legal/safe/profitable to get through 'a'
-// ...
-// = b
-// => Yes, update the code
-// b = sext i32 c to i64
-// a = ... i64 b, 3
-// ...
-// = a
-// Iterate on 'c'.
-bool
-AArch64AddressTypePromotion::propagateSignExtension(Instructions &SExtInsts) {
- DEBUG(dbgs() << "*** Propagate Sign Extension ***\n");
-
- bool LocalChange = false;
- SetOfInstructions ToRemove;
- ValueToInsts ValToSExtendedUses;
- while (!SExtInsts.empty()) {
- // Get through simple chain.
- Instruction *SExt = SExtInsts.pop_back_val();
-
- DEBUG(dbgs() << "Consider:\n" << *SExt << '\n');
-
- // If this SExt has already been merged continue.
- if (SExt->use_empty() && ToRemove.count(SExt)) {
- DEBUG(dbgs() << "No uses => marked as delete\n");
- continue;
- }
-
- // Now try to get through the chain of definitions.
- while (auto *Inst = dyn_cast<Instruction>(SExt->getOperand(0))) {
- DEBUG(dbgs() << "Try to get through:\n" << *Inst << '\n');
- if (!canGetThrough(Inst) || !shouldGetThrough(Inst)) {
- // We cannot get through something that is not an Instruction
- // or not safe to SExt.
- DEBUG(dbgs() << "Cannot get through\n");
- break;
- }
-
- LocalChange = true;
- // If this is a sign extend, it becomes useless.
- if (isa<SExtInst>(Inst) || isa<TruncInst>(Inst)) {
- DEBUG(dbgs() << "SExt or trunc, mark it as to remove\n");
- // We cannot use replaceAllUsesWith here because we may trigger some
- // assertion on the type as all involved sext operation may have not
- // been moved yet.
- while (!Inst->use_empty()) {
- Use &U = *Inst->use_begin();
- Instruction *User = dyn_cast<Instruction>(U.getUser());
- assert(User && "User of sext is not an Instruction!");
- User->setOperand(U.getOperandNo(), SExt);
- }
- ToRemove.insert(Inst);
- SExt->setOperand(0, Inst->getOperand(0));
- SExt->moveBefore(Inst);
- continue;
- }
-
- // Get through the Instruction:
- // 1. Update its type.
- // 2. Replace the uses of SExt by Inst.
- // 3. Sign extend each operand that needs to be sign extended.
-
- // Step #1.
- Inst->mutateType(SExt->getType());
- // Step #2.
- SExt->replaceAllUsesWith(Inst);
- // Step #3.
- Instruction *SExtForOpnd = SExt;
-
- DEBUG(dbgs() << "Propagate SExt to operands\n");
- for (int OpIdx = 0, EndOpIdx = Inst->getNumOperands(); OpIdx != EndOpIdx;
- ++OpIdx) {
- DEBUG(dbgs() << "Operand:\n" << *(Inst->getOperand(OpIdx)) << '\n');
- if (Inst->getOperand(OpIdx)->getType() == SExt->getType() ||
- !shouldSExtOperand(Inst, OpIdx)) {
- DEBUG(dbgs() << "No need to propagate\n");
- continue;
- }
- // Check if we can statically sign extend the operand.
- Value *Opnd = Inst->getOperand(OpIdx);
- if (const ConstantInt *Cst = dyn_cast<ConstantInt>(Opnd)) {
- DEBUG(dbgs() << "Statically sign extend\n");
- Inst->setOperand(OpIdx, ConstantInt::getSigned(SExt->getType(),
- Cst->getSExtValue()));
- continue;
- }
- // UndefValue are typed, so we have to statically sign extend them.
- if (isa<UndefValue>(Opnd)) {
- DEBUG(dbgs() << "Statically sign extend\n");
- Inst->setOperand(OpIdx, UndefValue::get(SExt->getType()));
- continue;
- }
-
- // Otherwise we have to explicity sign extend it.
- assert(SExtForOpnd &&
- "Only one operand should have been sign extended");
-
- SExtForOpnd->setOperand(0, Opnd);
-
- DEBUG(dbgs() << "Move before:\n" << *Inst << "\nSign extend\n");
- // Move the sign extension before the insertion point.
- SExtForOpnd->moveBefore(Inst);
- Inst->setOperand(OpIdx, SExtForOpnd);
- // If more sext are required, new instructions will have to be created.
- SExtForOpnd = nullptr;
- }
- if (SExtForOpnd == SExt) {
- DEBUG(dbgs() << "Sign extension is useless now\n");
- ToRemove.insert(SExt);
- break;
- }
- }
-
- // If the use is already of the right type, connect its uses to its argument
- // and delete it.
- // This can happen for an Instruction all uses of which are sign extended.
- if (!ToRemove.count(SExt) &&
- SExt->getType() == SExt->getOperand(0)->getType()) {
- DEBUG(dbgs() << "Sign extension is useless, attach its use to "
- "its argument\n");
- SExt->replaceAllUsesWith(SExt->getOperand(0));
- ToRemove.insert(SExt);
- } else
- ValToSExtendedUses[SExt->getOperand(0)].push_back(SExt);
- }
-
- if (EnableMerge)
- mergeSExts(ValToSExtendedUses, ToRemove);
-
- // Remove all instructions marked as ToRemove.
- for (Instruction *I: ToRemove)
- I->eraseFromParent();
- return LocalChange;
-}
-
-void AArch64AddressTypePromotion::mergeSExts(ValueToInsts &ValToSExtendedUses,
- SetOfInstructions &ToRemove) {
- DominatorTree &DT = getAnalysis<DominatorTreeWrapperPass>().getDomTree();
-
- for (auto &Entry : ValToSExtendedUses) {
- Instructions &Insts = Entry.second;
- Instructions CurPts;
- for (Instruction *Inst : Insts) {
- if (ToRemove.count(Inst))
- continue;
- bool inserted = false;
- for (auto &Pt : CurPts) {
- if (DT.dominates(Inst, Pt)) {
- DEBUG(dbgs() << "Replace all uses of:\n" << *Pt << "\nwith:\n"
- << *Inst << '\n');
- Pt->replaceAllUsesWith(Inst);
- ToRemove.insert(Pt);
- Pt = Inst;
- inserted = true;
- break;
- }
- if (!DT.dominates(Pt, Inst))
- // Give up if we need to merge in a common dominator as the
- // expermients show it is not profitable.
- continue;
-
- DEBUG(dbgs() << "Replace all uses of:\n" << *Inst << "\nwith:\n"
- << *Pt << '\n');
- Inst->replaceAllUsesWith(Pt);
- ToRemove.insert(Inst);
- inserted = true;
- break;
- }
- if (!inserted)
- CurPts.push_back(Inst);
- }
- }
-}
-
-void AArch64AddressTypePromotion::analyzeSExtension(Instructions &SExtInsts) {
- DEBUG(dbgs() << "*** Analyze Sign Extensions ***\n");
-
- DenseMap<Value *, Instruction *> SeenChains;
-
- for (auto &BB : *Func) {
- for (auto &II : BB) {
- Instruction *SExt = &II;
-
- // Collect all sext operation per type.
- if (!isa<SExtInst>(SExt) || !shouldConsiderSExt(SExt))
- continue;
-
- DEBUG(dbgs() << "Found:\n" << (*SExt) << '\n');
-
- // Cases where we actually perform the optimization:
- // 1. SExt is used in a getelementptr with more than 2 operand =>
- // likely we can merge some computation if they are done on 64 bits.
- // 2. The beginning of the SExt chain is SExt several time. =>
- // code sharing is possible.
-
- bool insert = false;
- // #1.
- for (const User *U : SExt->users()) {
- const Instruction *Inst = dyn_cast<GetElementPtrInst>(U);
- if (Inst && Inst->getNumOperands() > 2) {
- DEBUG(dbgs() << "Interesting use in GetElementPtrInst\n" << *Inst
- << '\n');
- insert = true;
- break;
- }
- }
-
- // #2.
- // Check the head of the chain.
- Instruction *Inst = SExt;
- Value *Last;
- do {
- int OpdIdx = 0;
- const BinaryOperator *BinOp = dyn_cast<BinaryOperator>(Inst);
- if (BinOp && isa<ConstantInt>(BinOp->getOperand(0)))
- OpdIdx = 1;
- Last = Inst->getOperand(OpdIdx);
- Inst = dyn_cast<Instruction>(Last);
- } while (Inst && canGetThrough(Inst) && shouldGetThrough(Inst));
-
- DEBUG(dbgs() << "Head of the chain:\n" << *Last << '\n');
- DenseMap<Value *, Instruction *>::iterator AlreadySeen =
- SeenChains.find(Last);
- if (insert || AlreadySeen != SeenChains.end()) {
- DEBUG(dbgs() << "Insert\n");
- SExtInsts.push_back(SExt);
- if (AlreadySeen != SeenChains.end() && AlreadySeen->second != nullptr) {
- DEBUG(dbgs() << "Insert chain member\n");
- SExtInsts.push_back(AlreadySeen->second);
- SeenChains[Last] = nullptr;
- }
- } else {
- DEBUG(dbgs() << "Record its chain membership\n");
- SeenChains[Last] = SExt;
- }
- }
- }
-}
-
-bool AArch64AddressTypePromotion::runOnFunction(Function &F) {
- if (skipFunction(F))
- return false;
-
- if (F.isDeclaration())
- return false;
- Func = &F;
- ConsideredSExtType = Type::getInt64Ty(Func->getContext());
-
- DEBUG(dbgs() << "*** " << getPassName() << ": " << Func->getName() << '\n');
-
- Instructions SExtInsts;
- analyzeSExtension(SExtInsts);
- return propagateSignExtension(SExtInsts);
-}
diff --git a/lib/Target/AArch64/AArch64ISelLowering.cpp b/lib/Target/AArch64/AArch64ISelLowering.cpp
index eb1bbcafe6e6..4b1bb27dce73 100644
--- a/lib/Target/AArch64/AArch64ISelLowering.cpp
+++ b/lib/Target/AArch64/AArch64ISelLowering.cpp
@@ -758,6 +758,9 @@ void AArch64TargetLowering::addTypeForNEON(MVT VT, MVT PromotedBitwiseVT) {
setOperationAction(ISD::FP_TO_SINT, VT, Custom);
setOperationAction(ISD::FP_TO_UINT, VT, Custom);
+ if (!VT.isFloatingPoint())
+ setOperationAction(ISD::ABS, VT, Legal);
+
// [SU][MIN|MAX] are available for all NEON types apart from i64.
if (!VT.isFloatingPoint() && VT != MVT::v2i64 && VT != MVT::v1i64)
for (unsigned Opcode : {ISD::SMIN, ISD::SMAX, ISD::UMIN, ISD::UMAX})
@@ -2482,6 +2485,9 @@ SDValue AArch64TargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op,
EVT PtrVT = getPointerTy(DAG.getDataLayout());
return DAG.getNode(AArch64ISD::THREAD_POINTER, dl, PtrVT);
}
+ case Intrinsic::aarch64_neon_abs:
+ return DAG.getNode(ISD::ABS, dl, Op.getValueType(),
+ Op.getOperand(1));
case Intrinsic::aarch64_neon_smax:
return DAG.getNode(ISD::SMAX, dl, Op.getValueType(),
Op.getOperand(1), Op.getOperand(2));
diff --git a/lib/Target/AArch64/AArch64InstrInfo.td b/lib/Target/AArch64/AArch64InstrInfo.td
index ce401206e517..902b08844216 100644
--- a/lib/Target/AArch64/AArch64InstrInfo.td
+++ b/lib/Target/AArch64/AArch64InstrInfo.td
@@ -2734,60 +2734,36 @@ defm FMOV : FPMoveImmediate<"fmov">;
defm UABDL : SIMDLongThreeVectorBHSabdl<1, 0b0111, "uabdl",
int_aarch64_neon_uabd>;
// Match UABDL in log2-shuffle patterns.
+def : Pat<(abs (v8i16 (sub (zext (v8i8 V64:$opA)),
+ (zext (v8i8 V64:$opB))))),
+ (UABDLv8i8_v8i16 V64:$opA, V64:$opB)>;
def : Pat<(xor (v8i16 (AArch64vashr v8i16:$src, (i32 15))),
(v8i16 (add (sub (zext (v8i8 V64:$opA)),
(zext (v8i8 V64:$opB))),
(AArch64vashr v8i16:$src, (i32 15))))),
(UABDLv8i8_v8i16 V64:$opA, V64:$opB)>;
+def : Pat<(abs (v8i16 (sub (zext (extract_high_v16i8 V128:$opA)),
+ (zext (extract_high_v16i8 V128:$opB))))),
+ (UABDLv16i8_v8i16 V128:$opA, V128:$opB)>;
def : Pat<(xor (v8i16 (AArch64vashr v8i16:$src, (i32 15))),
(v8i16 (add (sub (zext (extract_high_v16i8 V128:$opA)),
(zext (extract_high_v16i8 V128:$opB))),
(AArch64vashr v8i16:$src, (i32 15))))),
(UABDLv16i8_v8i16 V128:$opA, V128:$opB)>;
-def : Pat<(xor (v4i32 (AArch64vashr v4i32:$src, (i32 31))),
- (v4i32 (add (sub (zext (v4i16 V64:$opA)),
- (zext (v4i16 V64:$opB))),
- (AArch64vashr v4i32:$src, (i32 31))))),
+def : Pat<(abs (v4i32 (sub (zext (v4i16 V64:$opA)),
+ (zext (v4i16 V64:$opB))))),
(UABDLv4i16_v4i32 V64:$opA, V64:$opB)>;
-def : Pat<(xor (v4i32 (AArch64vashr v4i32:$src, (i32 31))),
- (v4i32 (add (sub (zext (extract_high_v8i16 V128:$opA)),
- (zext (extract_high_v8i16 V128:$opB))),
- (AArch64vashr v4i32:$src, (i32 31))))),
+def : Pat<(abs (v4i32 (sub (zext (extract_high_v8i16 V128:$opA)),
+ (zext (extract_high_v8i16 V128:$opB))))),
(UABDLv8i16_v4i32 V128:$opA, V128:$opB)>;
-def : Pat<(xor (v2i64 (AArch64vashr v2i64:$src, (i32 63))),
- (v2i64 (add (sub (zext (v2i32 V64:$opA)),
- (zext (v2i32 V64:$opB))),
- (AArch64vashr v2i64:$src, (i32 63))))),
+def : Pat<(abs (v2i64 (sub (zext (v2i32 V64:$opA)),
+ (zext (v2i32 V64:$opB))))),
(UABDLv2i32_v2i64 V64:$opA, V64:$opB)>;
-def : Pat<(xor (v2i64 (AArch64vashr v2i64:$src, (i32 63))),
- (v2i64 (add (sub (zext (extract_high_v4i32 V128:$opA)),
- (zext (extract_high_v4i32 V128:$opB))),
- (AArch64vashr v2i64:$src, (i32 63))))),
+def : Pat<(abs (v2i64 (sub (zext (extract_high_v4i32 V128:$opA)),
+ (zext (extract_high_v4i32 V128:$opB))))),
(UABDLv4i32_v2i64 V128:$opA, V128:$opB)>;
-defm ABS : SIMDTwoVectorBHSD<0, 0b01011, "abs", int_aarch64_neon_abs>;
-def : Pat<(xor (v8i8 (AArch64vashr V64:$src, (i32 7))),
- (v8i8 (add V64:$src, (AArch64vashr V64:$src, (i32 7))))),
- (ABSv8i8 V64:$src)>;
-def : Pat<(xor (v4i16 (AArch64vashr V64:$src, (i32 15))),
- (v4i16 (add V64:$src, (AArch64vashr V64:$src, (i32 15))))),
- (ABSv4i16 V64:$src)>;
-def : Pat<(xor (v2i32 (AArch64vashr V64:$src, (i32 31))),
- (v2i32 (add V64:$src, (AArch64vashr V64:$src, (i32 31))))),
- (ABSv2i32 V64:$src)>;
-def : Pat<(xor (v16i8 (AArch64vashr V128:$src, (i32 7))),
- (v16i8 (add V128:$src, (AArch64vashr V128:$src, (i32 7))))),
- (ABSv16i8 V128:$src)>;
-def : Pat<(xor (v8i16 (AArch64vashr V128:$src, (i32 15))),
- (v8i16 (add V128:$src, (AArch64vashr V128:$src, (i32 15))))),
- (ABSv8i16 V128:$src)>;
-def : Pat<(xor (v4i32 (AArch64vashr V128:$src, (i32 31))),
- (v4i32 (add V128:$src, (AArch64vashr V128:$src, (i32 31))))),
- (ABSv4i32 V128:$src)>;
-def : Pat<(xor (v2i64 (AArch64vashr V128:$src, (i32 63))),
- (v2i64 (add V128:$src, (AArch64vashr V128:$src, (i32 63))))),
- (ABSv2i64 V128:$src)>;
-
+defm ABS : SIMDTwoVectorBHSD<0, 0b01011, "abs", abs>;
defm CLS : SIMDTwoVectorBHS<0, 0b00100, "cls", int_aarch64_neon_cls>;
defm CLZ : SIMDTwoVectorBHS<1, 0b00100, "clz", ctlz>;
defm CMEQ : SIMDCmpTwoVector<0, 0b01001, "cmeq", AArch64cmeqz>;
@@ -3359,7 +3335,7 @@ def : Pat<(i64 (int_aarch64_neon_sqsub (i64 FPR64:$Rd),
// Advanced SIMD two scalar instructions.
//===----------------------------------------------------------------------===//
-defm ABS : SIMDTwoScalarD< 0, 0b01011, "abs", int_aarch64_neon_abs>;
+defm ABS : SIMDTwoScalarD< 0, 0b01011, "abs", abs>;
defm CMEQ : SIMDCmpTwoScalarD< 0, 0b01001, "cmeq", AArch64cmeqz>;
defm CMGE : SIMDCmpTwoScalarD< 1, 0b01000, "cmge", AArch64cmgez>;
defm CMGT : SIMDCmpTwoScalarD< 0, 0b01000, "cmgt", AArch64cmgtz>;
diff --git a/lib/Target/AArch64/AArch64RegisterBankInfo.cpp b/lib/Target/AArch64/AArch64RegisterBankInfo.cpp
index 6f9021c4a030..5f895903da6f 100644
--- a/lib/Target/AArch64/AArch64RegisterBankInfo.cpp
+++ b/lib/Target/AArch64/AArch64RegisterBankInfo.cpp
@@ -260,15 +260,15 @@ AArch64RegisterBankInfo::getInstrAlternativeMappings(
if (MI.getNumOperands() != 3)
break;
InstructionMappings AltMappings;
- InstructionMapping GPRMapping(
+ const InstructionMapping &GPRMapping = getInstructionMapping(
/*ID*/ 1, /*Cost*/ 1, getValueMapping(PMI_FirstGPR, Size),
/*NumOperands*/ 3);
- InstructionMapping FPRMapping(
+ const InstructionMapping &FPRMapping = getInstructionMapping(
/*ID*/ 2, /*Cost*/ 1, getValueMapping(PMI_FirstFPR, Size),
/*NumOperands*/ 3);
- AltMappings.emplace_back(std::move(GPRMapping));
- AltMappings.emplace_back(std::move(FPRMapping));
+ AltMappings.push_back(&GPRMapping);
+ AltMappings.push_back(&FPRMapping);
return AltMappings;
}
case TargetOpcode::G_BITCAST: {
@@ -282,29 +282,29 @@ AArch64RegisterBankInfo::getInstrAlternativeMappings(
break;
InstructionMappings AltMappings;
- InstructionMapping GPRMapping(
+ const InstructionMapping &GPRMapping = getInstructionMapping(
/*ID*/ 1, /*Cost*/ 1,
getCopyMapping(AArch64::GPRRegBankID, AArch64::GPRRegBankID, Size),
/*NumOperands*/ 2);
- InstructionMapping FPRMapping(
+ const InstructionMapping &FPRMapping = getInstructionMapping(
/*ID*/ 2, /*Cost*/ 1,
getCopyMapping(AArch64::FPRRegBankID, AArch64::FPRRegBankID, Size),
/*NumOperands*/ 2);
- InstructionMapping GPRToFPRMapping(
+ const InstructionMapping &GPRToFPRMapping = getInstructionMapping(
/*ID*/ 3,
/*Cost*/ copyCost(AArch64::GPRRegBank, AArch64::FPRRegBank, Size),
getCopyMapping(AArch64::FPRRegBankID, AArch64::GPRRegBankID, Size),
/*NumOperands*/ 2);
- InstructionMapping FPRToGPRMapping(
+ const InstructionMapping &FPRToGPRMapping = getInstructionMapping(
/*ID*/ 3,
/*Cost*/ copyCost(AArch64::GPRRegBank, AArch64::FPRRegBank, Size),
getCopyMapping(AArch64::GPRRegBankID, AArch64::FPRRegBankID, Size),
/*NumOperands*/ 2);
- AltMappings.emplace_back(std::move(GPRMapping));
- AltMappings.emplace_back(std::move(FPRMapping));
- AltMappings.emplace_back(std::move(GPRToFPRMapping));
- AltMappings.emplace_back(std::move(FPRToGPRMapping));
+ AltMappings.push_back(&GPRMapping);
+ AltMappings.push_back(&FPRMapping);
+ AltMappings.push_back(&GPRToFPRMapping);
+ AltMappings.push_back(&FPRToGPRMapping);
return AltMappings;
}
case TargetOpcode::G_LOAD: {
@@ -318,21 +318,21 @@ AArch64RegisterBankInfo::getInstrAlternativeMappings(
break;
InstructionMappings AltMappings;
- InstructionMapping GPRMapping(
+ const InstructionMapping &GPRMapping = getInstructionMapping(
/*ID*/ 1, /*Cost*/ 1,
getOperandsMapping({getValueMapping(PMI_FirstGPR, Size),
// Addresses are GPR 64-bit.
getValueMapping(PMI_FirstGPR, 64)}),
/*NumOperands*/ 2);
- InstructionMapping FPRMapping(
+ const InstructionMapping &FPRMapping = getInstructionMapping(
/*ID*/ 2, /*Cost*/ 1,
getOperandsMapping({getValueMapping(PMI_FirstFPR, Size),
// Addresses are GPR 64-bit.
getValueMapping(PMI_FirstGPR, 64)}),
/*NumOperands*/ 2);
- AltMappings.emplace_back(std::move(GPRMapping));
- AltMappings.emplace_back(std::move(FPRMapping));
+ AltMappings.push_back(&GPRMapping);
+ AltMappings.push_back(&FPRMapping);
return AltMappings;
}
default:
@@ -373,8 +373,9 @@ static bool isPreISelGenericFloatingPointOpcode(unsigned Opc) {
return false;
}
-RegisterBankInfo::InstructionMapping
-AArch64RegisterBankInfo::getSameKindOfOperandsMapping(const MachineInstr &MI) {
+const RegisterBankInfo::InstructionMapping &
+AArch64RegisterBankInfo::getSameKindOfOperandsMapping(
+ const MachineInstr &MI) const {
const unsigned Opc = MI.getOpcode();
const MachineFunction &MF = *MI.getParent()->getParent();
const MachineRegisterInfo &MRI = MF.getRegInfo();
@@ -411,11 +412,11 @@ AArch64RegisterBankInfo::getSameKindOfOperandsMapping(const MachineInstr &MI) {
}
#endif // End NDEBUG.
- return InstructionMapping{DefaultMappingID, 1, getValueMapping(RBIdx, Size),
- NumOperands};
+ return getInstructionMapping(DefaultMappingID, 1,
+ getValueMapping(RBIdx, Size), NumOperands);
}
-RegisterBankInfo::InstructionMapping
+const RegisterBankInfo::InstructionMapping &
AArch64RegisterBankInfo::getInstrMapping(const MachineInstr &MI) const {
const unsigned Opc = MI.getOpcode();
const MachineFunction &MF = *MI.getParent()->getParent();
@@ -424,7 +425,8 @@ AArch64RegisterBankInfo::getInstrMapping(const MachineInstr &MI) const {
// Try the default logic for non-generic instructions that are either copies
// or already have some operands assigned to banks.
if (!isPreISelGenericOpcode(Opc)) {
- RegisterBankInfo::InstructionMapping Mapping = getInstrMappingImpl(MI);
+ const RegisterBankInfo::InstructionMapping &Mapping =
+ getInstrMappingImpl(MI);
if (Mapping.isValid())
return Mapping;
}
@@ -462,15 +464,15 @@ AArch64RegisterBankInfo::getInstrMapping(const MachineInstr &MI) const {
DstIsGPR ? AArch64::GPRRegBank : AArch64::FPRRegBank;
const RegisterBank &SrcRB =
SrcIsGPR ? AArch64::GPRRegBank : AArch64::FPRRegBank;
- return InstructionMapping{
+ return getInstructionMapping(
DefaultMappingID, copyCost(DstRB, SrcRB, Size),
getCopyMapping(DstRB.getID(), SrcRB.getID(), Size),
- /*NumOperands*/ 2};
+ /*NumOperands*/ 2);
}
case TargetOpcode::G_SEQUENCE:
// FIXME: support this, but the generic code is really not going to do
// anything sane.
- return InstructionMapping();
+ return getInvalidInstructionMapping();
default:
break;
}
@@ -533,19 +535,17 @@ AArch64RegisterBankInfo::getInstrMapping(const MachineInstr &MI) const {
}
// Finally construct the computed mapping.
- RegisterBankInfo::InstructionMapping Mapping =
- InstructionMapping{DefaultMappingID, Cost, nullptr, NumOperands};
SmallVector<const ValueMapping *, 8> OpdsMapping(NumOperands);
for (unsigned Idx = 0; Idx < NumOperands; ++Idx) {
if (MI.getOperand(Idx).isReg() && MI.getOperand(Idx).getReg()) {
auto Mapping = getValueMapping(OpRegBankIdx[Idx], OpSize[Idx]);
if (!Mapping->isValid())
- return InstructionMapping();
+ return getInvalidInstructionMapping();
OpdsMapping[Idx] = Mapping;
}
}
- Mapping.setOperandsMapping(getOperandsMapping(OpdsMapping));
- return Mapping;
+ return getInstructionMapping(DefaultMappingID, Cost,
+ getOperandsMapping(OpdsMapping), NumOperands);
}
diff --git a/lib/Target/AArch64/AArch64RegisterBankInfo.h b/lib/Target/AArch64/AArch64RegisterBankInfo.h
index 0a795a42c0b1..6d74a47095a9 100644
--- a/lib/Target/AArch64/AArch64RegisterBankInfo.h
+++ b/lib/Target/AArch64/AArch64RegisterBankInfo.h
@@ -98,8 +98,8 @@ class AArch64RegisterBankInfo final : public AArch64GenRegisterBankInfo {
///
/// \return An InstructionMappings with a statically allocated
/// OperandsMapping.
- static InstructionMapping
- getSameKindOfOperandsMapping(const MachineInstr &MI);
+ const InstructionMapping &
+ getSameKindOfOperandsMapping(const MachineInstr &MI) const;
public:
AArch64RegisterBankInfo(const TargetRegisterInfo &TRI);
@@ -113,7 +113,8 @@ public:
InstructionMappings
getInstrAlternativeMappings(const MachineInstr &MI) const override;
- InstructionMapping getInstrMapping(const MachineInstr &MI) const override;
+ const InstructionMapping &
+ getInstrMapping(const MachineInstr &MI) const override;
};
} // End llvm namespace.
#endif
diff --git a/lib/Target/AArch64/AArch64TargetMachine.cpp b/lib/Target/AArch64/AArch64TargetMachine.cpp
index de7108d302dd..5a90fd1eb1ba 100644
--- a/lib/Target/AArch64/AArch64TargetMachine.cpp
+++ b/lib/Target/AArch64/AArch64TargetMachine.cpp
@@ -109,11 +109,6 @@ EnableA53Fix835769("aarch64-fix-cortex-a53-835769", cl::Hidden,
cl::init(false));
static cl::opt<bool>
- EnableAddressTypePromotion("aarch64-enable-type-promotion", cl::Hidden,
- cl::desc("Enable the type promotion pass"),
- cl::init(false));
-
-static cl::opt<bool>
EnableGEPOpt("aarch64-enable-gep-opt", cl::Hidden,
cl::desc("Enable optimizations on complex GEPs"),
cl::init(false));
@@ -146,7 +141,6 @@ extern "C" void LLVMInitializeAArch64Target() {
initializeGlobalISel(*PR);
initializeAArch64A53Fix835769Pass(*PR);
initializeAArch64A57FPLoadBalancingPass(*PR);
- initializeAArch64AddressTypePromotionPass(*PR);
initializeAArch64AdvSIMDScalarPass(*PR);
initializeAArch64CollectLOHPass(*PR);
initializeAArch64ConditionalComparesPass(*PR);
@@ -382,9 +376,6 @@ bool AArch64PassConfig::addPreISel() {
addPass(createGlobalMergePass(TM, 4095, OnlyOptimizeForSize));
}
- if (TM->getOptLevel() != CodeGenOpt::None && EnableAddressTypePromotion)
- addPass(createAArch64AddressTypePromotionPass());
-
return false;
}
diff --git a/lib/Target/AArch64/CMakeLists.txt b/lib/Target/AArch64/CMakeLists.txt
index 6d0930c358f1..f0f50f29be0f 100644
--- a/lib/Target/AArch64/CMakeLists.txt
+++ b/lib/Target/AArch64/CMakeLists.txt
@@ -39,7 +39,6 @@ endif()
add_llvm_target(AArch64CodeGen
AArch64A57FPLoadBalancing.cpp
- AArch64AddressTypePromotion.cpp
AArch64AdvSIMDScalarPass.cpp
AArch64AsmPrinter.cpp
AArch64CleanupLocalDynamicTLSPass.cpp
diff --git a/lib/Target/AMDGPU/AMDGPUAsmPrinter.cpp b/lib/Target/AMDGPU/AMDGPUAsmPrinter.cpp
index 2ce23dbf08e6..f473944cd528 100644
--- a/lib/Target/AMDGPU/AMDGPUAsmPrinter.cpp
+++ b/lib/Target/AMDGPU/AMDGPUAsmPrinter.cpp
@@ -713,7 +713,8 @@ void AMDGPUAsmPrinter::getSIProgramInfo(SIProgramInfo &ProgInfo,
S_00B84C_TG_SIZE_EN(MFI->hasWorkGroupInfo()) |
S_00B84C_TIDIG_COMP_CNT(TIDIGCompCnt) |
S_00B84C_EXCP_EN_MSB(0) |
- S_00B84C_LDS_SIZE(ProgInfo.LDSBlocks) |
+ // For AMDHSA, LDS_SIZE must be zero, as it is populated by the CP.
+ S_00B84C_LDS_SIZE(STM.isAmdHsaOS() ? 0 : ProgInfo.LDSBlocks) |
S_00B84C_EXCP_EN(0);
}
diff --git a/lib/Target/AMDGPU/AMDGPUISelLowering.cpp b/lib/Target/AMDGPU/AMDGPUISelLowering.cpp
index 64e1b8f0d7f0..915d1d9e0e68 100644
--- a/lib/Target/AMDGPU/AMDGPUISelLowering.cpp
+++ b/lib/Target/AMDGPU/AMDGPUISelLowering.cpp
@@ -3580,7 +3580,7 @@ void AMDGPUTargetLowering::computeKnownBitsForTargetNode(
const SDValue Op, KnownBits &Known,
const APInt &DemandedElts, const SelectionDAG &DAG, unsigned Depth) const {
- Known.Zero.clearAllBits(); Known.One.clearAllBits(); // Don't know anything.
+ Known.resetAll(); // Don't know anything.
KnownBits Known2;
unsigned Opc = Op.getOpcode();
diff --git a/lib/Target/AMDGPU/AMDGPURegisterBankInfo.cpp b/lib/Target/AMDGPU/AMDGPURegisterBankInfo.cpp
index a5edc0c3b937..623b2c88ab8f 100644
--- a/lib/Target/AMDGPU/AMDGPURegisterBankInfo.cpp
+++ b/lib/Target/AMDGPU/AMDGPURegisterBankInfo.cpp
@@ -82,25 +82,28 @@ AMDGPURegisterBankInfo::getInstrAlternativeMappings(
switch (MI.getOpcode()) {
case TargetOpcode::G_LOAD: {
// FIXME: Should we be hard coding the size for these mappings?
- InstructionMapping SSMapping(1, 1,
- getOperandsMapping({AMDGPU::getValueMapping(AMDGPU::SGPRRegBankID, Size),
- AMDGPU::getValueMapping(AMDGPU::SGPRRegBankID, 64)}),
- 2); // Num Operands
- AltMappings.emplace_back(std::move(SSMapping));
-
- InstructionMapping VVMapping(2, 1,
- getOperandsMapping({AMDGPU::getValueMapping(AMDGPU::VGPRRegBankID, Size),
- AMDGPU::getValueMapping(AMDGPU::VGPRRegBankID, 64)}),
- 2); // Num Operands
- AltMappings.emplace_back(std::move(VVMapping));
+ const InstructionMapping &SSMapping = getInstructionMapping(
+ 1, 1, getOperandsMapping(
+ {AMDGPU::getValueMapping(AMDGPU::SGPRRegBankID, Size),
+ AMDGPU::getValueMapping(AMDGPU::SGPRRegBankID, 64)}),
+ 2); // Num Operands
+ AltMappings.push_back(&SSMapping);
+
+ const InstructionMapping &VVMapping = getInstructionMapping(
+ 2, 1, getOperandsMapping(
+ {AMDGPU::getValueMapping(AMDGPU::VGPRRegBankID, Size),
+ AMDGPU::getValueMapping(AMDGPU::VGPRRegBankID, 64)}),
+ 2); // Num Operands
+ AltMappings.push_back(&VVMapping);
// FIXME: Should this be the pointer-size (64-bits) or the size of the
// register that will hold the bufffer resourc (128-bits).
- InstructionMapping VSMapping(3, 1,
- getOperandsMapping({AMDGPU::getValueMapping(AMDGPU::VGPRRegBankID, Size),
- AMDGPU::getValueMapping(AMDGPU::SGPRRegBankID, 64)}),
- 2); // Num Operands
- AltMappings.emplace_back(std::move(VSMapping));
+ const InstructionMapping &VSMapping = getInstructionMapping(
+ 3, 1, getOperandsMapping(
+ {AMDGPU::getValueMapping(AMDGPU::VGPRRegBankID, Size),
+ AMDGPU::getValueMapping(AMDGPU::SGPRRegBankID, 64)}),
+ 2); // Num Operands
+ AltMappings.push_back(&VSMapping);
return AltMappings;
@@ -124,13 +127,11 @@ static bool isInstrUniform(const MachineInstr &MI) {
return AMDGPU::isUniformMMO(MMO);
}
-RegisterBankInfo::InstructionMapping
+const RegisterBankInfo::InstructionMapping &
AMDGPURegisterBankInfo::getInstrMappingForLoad(const MachineInstr &MI) const {
const MachineFunction &MF = *MI.getParent()->getParent();
const MachineRegisterInfo &MRI = MF.getRegInfo();
- RegisterBankInfo::InstructionMapping Mapping =
- InstructionMapping{1, 1, nullptr, MI.getNumOperands()};
SmallVector<const ValueMapping*, 8> OpdsMapping(MI.getNumOperands());
unsigned Size = getSizeInBits(MI.getOperand(0).getReg(), MRI, *TRI);
unsigned PtrSize = getSizeInBits(MI.getOperand(1).getReg(), MRI, *TRI);
@@ -150,32 +151,34 @@ AMDGPURegisterBankInfo::getInstrMappingForLoad(const MachineInstr &MI) const {
OpdsMapping[0] = ValMapping;
OpdsMapping[1] = PtrMapping;
- Mapping.setOperandsMapping(getOperandsMapping(OpdsMapping));
+ const RegisterBankInfo::InstructionMapping &Mapping = getInstructionMapping(
+ 1, 1, getOperandsMapping(OpdsMapping), MI.getNumOperands());
return Mapping;
// FIXME: Do we want to add a mapping for FLAT load, or should we just
// handle that during instruction selection?
}
-RegisterBankInfo::InstructionMapping
+const RegisterBankInfo::InstructionMapping &
AMDGPURegisterBankInfo::getInstrMapping(const MachineInstr &MI) const {
- RegisterBankInfo::InstructionMapping Mapping = getInstrMappingImpl(MI);
+ const RegisterBankInfo::InstructionMapping &Mapping = getInstrMappingImpl(MI);
if (Mapping.isValid())
return Mapping;
const MachineFunction &MF = *MI.getParent()->getParent();
const MachineRegisterInfo &MRI = MF.getRegInfo();
- Mapping = InstructionMapping{1, 1, nullptr, MI.getNumOperands()};
SmallVector<const ValueMapping*, 8> OpdsMapping(MI.getNumOperands());
+ bool IsComplete = true;
switch (MI.getOpcode()) {
- default: break;
+ default:
+ IsComplete = false;
+ break;
case AMDGPU::G_CONSTANT: {
unsigned Size = MRI.getType(MI.getOperand(0).getReg()).getSizeInBits();
OpdsMapping[0] = AMDGPU::getValueMapping(AMDGPU::SGPRRegBankID, Size);
- Mapping.setOperandsMapping(getOperandsMapping(OpdsMapping));
- return Mapping;
+ break;
}
case AMDGPU::G_GEP: {
for (unsigned i = 0, e = MI.getNumOperands(); i != e; ++i) {
@@ -185,8 +188,7 @@ AMDGPURegisterBankInfo::getInstrMapping(const MachineInstr &MI) const {
unsigned Size = MRI.getType(MI.getOperand(i).getReg()).getSizeInBits();
OpdsMapping[i] = AMDGPU::getValueMapping(AMDGPU::SGPRRegBankID, Size);
}
- Mapping.setOperandsMapping(getOperandsMapping(OpdsMapping));
- return Mapping;
+ break;
}
case AMDGPU::G_STORE: {
assert(MI.getOperand(0).isReg());
@@ -203,28 +205,27 @@ AMDGPURegisterBankInfo::getInstrMapping(const MachineInstr &MI) const {
OpdsMapping[0] = ValMapping;
OpdsMapping[1] = PtrMapping;
- Mapping.setOperandsMapping(getOperandsMapping(OpdsMapping));
- return Mapping;
+ break;
}
case AMDGPU::G_LOAD:
return getInstrMappingForLoad(MI);
}
- unsigned BankID = AMDGPU::SGPRRegBankID;
-
- Mapping = InstructionMapping{1, 1, nullptr, MI.getNumOperands()};
- unsigned Size = 0;
- for (unsigned Idx = 0; Idx < MI.getNumOperands(); ++Idx) {
- // If the operand is not a register default to the size of the previous
- // operand.
- // FIXME: Can't we pull the types from the MachineInstr rather than the
- // operands.
- if (MI.getOperand(Idx).isReg())
- Size = getSizeInBits(MI.getOperand(Idx).getReg(), MRI, *TRI);
- OpdsMapping.push_back(AMDGPU::getValueMapping(BankID, Size));
+ if (!IsComplete) {
+ unsigned BankID = AMDGPU::SGPRRegBankID;
+
+ unsigned Size = 0;
+ for (unsigned Idx = 0; Idx < MI.getNumOperands(); ++Idx) {
+ // If the operand is not a register default to the size of the previous
+ // operand.
+ // FIXME: Can't we pull the types from the MachineInstr rather than the
+ // operands.
+ if (MI.getOperand(Idx).isReg())
+ Size = getSizeInBits(MI.getOperand(Idx).getReg(), MRI, *TRI);
+ OpdsMapping.push_back(AMDGPU::getValueMapping(BankID, Size));
+ }
}
- Mapping.setOperandsMapping(getOperandsMapping(OpdsMapping));
-
- return Mapping;
+ return getInstructionMapping(1, 1, getOperandsMapping(OpdsMapping),
+ MI.getNumOperands());
}
diff --git a/lib/Target/AMDGPU/AMDGPURegisterBankInfo.h b/lib/Target/AMDGPU/AMDGPURegisterBankInfo.h
index f13bde87ef2d..7c198a1b8a3f 100644
--- a/lib/Target/AMDGPU/AMDGPURegisterBankInfo.h
+++ b/lib/Target/AMDGPU/AMDGPURegisterBankInfo.h
@@ -44,7 +44,7 @@ class AMDGPURegisterBankInfo : public AMDGPUGenRegisterBankInfo {
/// See RegisterBankInfo::applyMapping.
void applyMappingImpl(const OperandsMapper &OpdMapper) const override;
- RegisterBankInfo::InstructionMapping
+ const RegisterBankInfo::InstructionMapping &
getInstrMappingForLoad(const MachineInstr &MI) const;
public:
@@ -59,7 +59,8 @@ public:
InstructionMappings
getInstrAlternativeMappings(const MachineInstr &MI) const override;
- InstructionMapping getInstrMapping(const MachineInstr &MI) const override;
+ const InstructionMapping &
+ getInstrMapping(const MachineInstr &MI) const override;
};
} // End llvm namespace.
#endif
diff --git a/lib/Target/AMDGPU/SIFrameLowering.cpp b/lib/Target/AMDGPU/SIFrameLowering.cpp
index 86e3b37b09e9..1279f845de0e 100644
--- a/lib/Target/AMDGPU/SIFrameLowering.cpp
+++ b/lib/Target/AMDGPU/SIFrameLowering.cpp
@@ -353,7 +353,8 @@ void SIFrameLowering::emitPrologue(MachineFunction &MF,
if (OffsetRegUsed &&
PreloadedScratchWaveOffsetReg != ScratchWaveOffsetReg) {
BuildMI(MBB, I, DL, TII->get(AMDGPU::COPY), ScratchWaveOffsetReg)
- .addReg(PreloadedScratchWaveOffsetReg, RegState::Kill);
+ .addReg(PreloadedScratchWaveOffsetReg,
+ MRI.isPhysRegUsed(ScratchWaveOffsetReg) ? 0 : RegState::Kill);
}
if (CopyBuffer && !CopyBufferFirst) {
diff --git a/lib/Target/AMDGPU/SIISelLowering.cpp b/lib/Target/AMDGPU/SIISelLowering.cpp
index 853c8737b464..cc93c27731ff 100644
--- a/lib/Target/AMDGPU/SIISelLowering.cpp
+++ b/lib/Target/AMDGPU/SIISelLowering.cpp
@@ -1042,6 +1042,7 @@ static void allocateHSAUserSGPRs(CCState &CCInfo,
static void allocateSystemSGPRs(CCState &CCInfo,
MachineFunction &MF,
SIMachineFunctionInfo &Info,
+ CallingConv::ID CallConv,
bool IsShader) {
if (Info.hasWorkGroupIDX()) {
unsigned Reg = Info.addWorkGroupIDX();
@@ -1072,8 +1073,15 @@ static void allocateSystemSGPRs(CCState &CCInfo,
unsigned PrivateSegmentWaveByteOffsetReg;
if (IsShader) {
- PrivateSegmentWaveByteOffsetReg = findFirstFreeSGPR(CCInfo);
- Info.setPrivateSegmentWaveByteOffset(PrivateSegmentWaveByteOffsetReg);
+ PrivateSegmentWaveByteOffsetReg =
+ Info.getPrivateSegmentWaveByteOffsetSystemSGPR();
+
+ // This is true if the scratch wave byte offset doesn't have a fixed
+ // location.
+ if (PrivateSegmentWaveByteOffsetReg == AMDGPU::NoRegister) {
+ PrivateSegmentWaveByteOffsetReg = findFirstFreeSGPR(CCInfo);
+ Info.setPrivateSegmentWaveByteOffset(PrivateSegmentWaveByteOffsetReg);
+ }
} else
PrivateSegmentWaveByteOffsetReg = Info.addPrivateSegmentWaveByteOffset();
@@ -1310,7 +1318,7 @@ SDValue SITargetLowering::LowerFormalArguments(
// Start adding system SGPRs.
if (IsEntryFunc)
- allocateSystemSGPRs(CCInfo, MF, *Info, IsShader);
+ allocateSystemSGPRs(CCInfo, MF, *Info, CallConv, IsShader);
reservePrivateMemoryRegs(getTargetMachine(), MF, *TRI, *Info);
diff --git a/lib/Target/AMDGPU/SIInsertWaitcnts.cpp b/lib/Target/AMDGPU/SIInsertWaitcnts.cpp
index 9122cd72d323..b5e3ce3dfe3e 100644
--- a/lib/Target/AMDGPU/SIInsertWaitcnts.cpp
+++ b/lib/Target/AMDGPU/SIInsertWaitcnts.cpp
@@ -1087,7 +1087,7 @@ MachineInstr *SIInsertWaitcnts::generateSWaitCntInstBefore(
(CntVal[LGKM_CNT] & AMDGPU::getLgkmcntBitMask(IV)))) {
MachineLoop *ContainingLoop = MLI->getLoopFor(MI.getParent());
if (ContainingLoop) {
- MachineBasicBlock *TBB = ContainingLoop->getTopBlock();
+ MachineBasicBlock *TBB = ContainingLoop->getHeader();
BlockWaitcntBrackets *ScoreBracket =
BlockWaitcntBracketsMap[TBB].get();
if (!ScoreBracket) {
@@ -1097,7 +1097,7 @@ MachineInstr *SIInsertWaitcnts::generateSWaitCntInstBefore(
}
ScoreBracket->setRevisitLoop(true);
DEBUG(dbgs() << "set-revisit: block"
- << ContainingLoop->getTopBlock()->getNumber() << '\n';);
+ << ContainingLoop->getHeader()->getNumber() << '\n';);
}
}
@@ -1758,12 +1758,12 @@ bool SIInsertWaitcnts::runOnMachineFunction(MachineFunction &MF) {
// If we are walking into the block from before the loop, then guarantee
// at least 1 re-walk over the loop to propagate the information, even if
// no S_WAITCNT instructions were generated.
- if (ContainingLoop && ContainingLoop->getTopBlock() == &MBB && J < I &&
+ if (ContainingLoop && ContainingLoop->getHeader() == &MBB && J < I &&
(BlockWaitcntProcessedSet.find(&MBB) ==
BlockWaitcntProcessedSet.end())) {
BlockWaitcntBracketsMap[&MBB]->setRevisitLoop(true);
DEBUG(dbgs() << "set-revisit: block"
- << ContainingLoop->getTopBlock()->getNumber() << '\n';);
+ << ContainingLoop->getHeader()->getNumber() << '\n';);
}
// Walk over the instructions.
@@ -1774,7 +1774,7 @@ bool SIInsertWaitcnts::runOnMachineFunction(MachineFunction &MF) {
// See if we want to revisit the loop.
if (ContainingLoop && loopBottom(ContainingLoop) == &MBB) {
- MachineBasicBlock *EntryBB = ContainingLoop->getTopBlock();
+ MachineBasicBlock *EntryBB = ContainingLoop->getHeader();
BlockWaitcntBrackets *EntrySB = BlockWaitcntBracketsMap[EntryBB].get();
if (EntrySB && EntrySB->getRevisitLoop()) {
EntrySB->setRevisitLoop(false);
diff --git a/lib/Target/AMDGPU/SIMachineFunctionInfo.cpp b/lib/Target/AMDGPU/SIMachineFunctionInfo.cpp
index b6a982aee6be..adebb8c4a1c5 100644
--- a/lib/Target/AMDGPU/SIMachineFunctionInfo.cpp
+++ b/lib/Target/AMDGPU/SIMachineFunctionInfo.cpp
@@ -122,9 +122,15 @@ SIMachineFunctionInfo::SIMachineFunctionInfo(const MachineFunction &MF)
bool MaySpill = ST.isVGPRSpillingEnabled(*F);
bool HasStackObjects = FrameInfo.hasStackObjects();
- if (HasStackObjects || MaySpill)
+ if (HasStackObjects || MaySpill) {
PrivateSegmentWaveByteOffset = true;
+ // HS and GS always have the scratch wave offset in SGPR5 on GFX9.
+ if (ST.getGeneration() >= AMDGPUSubtarget::GFX9 &&
+ (CC == CallingConv::AMDGPU_HS || CC == CallingConv::AMDGPU_GS))
+ PrivateSegmentWaveByteOffsetSystemSGPR = AMDGPU::SGPR5;
+ }
+
if (ST.isAmdCodeObjectV2(MF)) {
if (HasStackObjects || MaySpill)
PrivateSegmentBuffer = true;
diff --git a/lib/Target/ARM/ARMBaseRegisterInfo.cpp b/lib/Target/ARM/ARMBaseRegisterInfo.cpp
index a20887564f44..b18ed509ed23 100644
--- a/lib/Target/ARM/ARMBaseRegisterInfo.cpp
+++ b/lib/Target/ARM/ARMBaseRegisterInfo.cpp
@@ -245,11 +245,18 @@ ARMBaseRegisterInfo::getRegPressureLimit(const TargetRegisterClass *RC,
switch (RC->getID()) {
default:
return 0;
- case ARM::tGPRRegClassID:
- return TFI->hasFP(MF) ? 4 : 5;
+ case ARM::tGPRRegClassID: {
+ // hasFP ends up calling getMaxCallFrameComputed() which may not be
+ // available when getPressureLimit() is called as part of
+ // ScheduleDAGRRList.
+ bool HasFP = MF.getFrameInfo().isMaxCallFrameSizeComputed()
+ ? TFI->hasFP(MF) : true;
+ return 5 - HasFP;
+ }
case ARM::GPRRegClassID: {
- unsigned FP = TFI->hasFP(MF) ? 1 : 0;
- return 10 - FP - (STI.isR9Reserved() ? 1 : 0);
+ bool HasFP = MF.getFrameInfo().isMaxCallFrameSizeComputed()
+ ? TFI->hasFP(MF) : true;
+ return 10 - HasFP - (STI.isR9Reserved() ? 1 : 0);
}
case ARM::SPRRegClassID: // Currently not used as 'rep' register class.
case ARM::DPRRegClassID:
diff --git a/lib/Target/ARM/ARMISelLowering.cpp b/lib/Target/ARM/ARMISelLowering.cpp
index 9f7e60a848d9..e64582402fe1 100644
--- a/lib/Target/ARM/ARMISelLowering.cpp
+++ b/lib/Target/ARM/ARMISelLowering.cpp
@@ -202,7 +202,7 @@ void ARMTargetLowering::addTypeForNEON(MVT VT, MVT PromotedLdStVT,
if (!VT.isFloatingPoint() &&
VT != MVT::v2i64 && VT != MVT::v1i64)
- for (unsigned Opcode : {ISD::SMIN, ISD::SMAX, ISD::UMIN, ISD::UMAX})
+ for (auto Opcode : {ISD::ABS, ISD::SMIN, ISD::SMAX, ISD::UMIN, ISD::UMAX})
setOperationAction(Opcode, VT, Legal);
}
@@ -822,6 +822,7 @@ ARMTargetLowering::ARMTargetLowering(const TargetMachine &TM,
setOperationAction(ISD::SRL_PARTS, MVT::i32, Custom);
setOperationAction(ISD::SRL, MVT::i64, Custom);
setOperationAction(ISD::SRA, MVT::i64, Custom);
+ setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::i64, Custom);
setOperationAction(ISD::ADDC, MVT::i32, Custom);
setOperationAction(ISD::ADDE, MVT::i32, Custom);
@@ -1344,6 +1345,10 @@ const char *ARMTargetLowering::getTargetNodeName(unsigned Opcode) const {
case ARMISD::SMLALTT: return "ARMISD::SMLALTT";
case ARMISD::SMULWB: return "ARMISD::SMULWB";
case ARMISD::SMULWT: return "ARMISD::SMULWT";
+ case ARMISD::SMLALD: return "ARMISD::SMLALD";
+ case ARMISD::SMLALDX: return "ARMISD::SMLALDX";
+ case ARMISD::SMLSLD: return "ARMISD::SMLSLD";
+ case ARMISD::SMLSLDX: return "ARMISD::SMLSLDX";
case ARMISD::BUILD_VECTOR: return "ARMISD::BUILD_VECTOR";
case ARMISD::BFI: return "ARMISD::BFI";
case ARMISD::VORRIMM: return "ARMISD::VORRIMM";
@@ -3311,6 +3316,9 @@ ARMTargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op, SelectionDAG &DAG,
}
return Result;
}
+ case Intrinsic::arm_neon_vabs:
+ return DAG.getNode(ISD::ABS, SDLoc(Op), Op.getValueType(),
+ Op.getOperand(1));
case Intrinsic::arm_neon_vmulls:
case Intrinsic::arm_neon_vmullu: {
unsigned NewOpc = (IntNo == Intrinsic::arm_neon_vmulls)
@@ -7722,6 +7730,37 @@ SDValue ARMTargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) const {
}
}
+static void ReplaceLongIntrinsic(SDNode *N, SmallVectorImpl<SDValue> &Results,
+ SelectionDAG &DAG) {
+ unsigned IntNo = cast<ConstantSDNode>(N->getOperand(0))->getZExtValue();
+ unsigned Opc = 0;
+ if (IntNo == Intrinsic::arm_smlald)
+ Opc = ARMISD::SMLALD;
+ else if (IntNo == Intrinsic::arm_smlaldx)
+ Opc = ARMISD::SMLALDX;
+ else if (IntNo == Intrinsic::arm_smlsld)
+ Opc = ARMISD::SMLSLD;
+ else if (IntNo == Intrinsic::arm_smlsldx)
+ Opc = ARMISD::SMLSLDX;
+ else
+ return;
+
+ SDLoc dl(N);
+ SDValue Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32,
+ N->getOperand(3),
+ DAG.getConstant(0, dl, MVT::i32));
+ SDValue Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32,
+ N->getOperand(3),
+ DAG.getConstant(1, dl, MVT::i32));
+
+ SDValue LongMul = DAG.getNode(Opc, dl,
+ DAG.getVTList(MVT::i32, MVT::i32),
+ N->getOperand(1), N->getOperand(2),
+ Lo, Hi);
+ Results.push_back(LongMul.getValue(0));
+ Results.push_back(LongMul.getValue(1));
+}
+
/// ReplaceNodeResults - Replace the results of node with an illegal result
/// type with new values built out of custom code.
void ARMTargetLowering::ReplaceNodeResults(SDNode *N,
@@ -7763,6 +7802,8 @@ void ARMTargetLowering::ReplaceNodeResults(SDNode *N,
case ISD::ATOMIC_CMP_SWAP:
ReplaceCMP_SWAP_64Results(N, Results, DAG);
return;
+ case ISD::INTRINSIC_WO_CHAIN:
+ return ReplaceLongIntrinsic(N, Results, DAG);
}
if (Res.getNode())
Results.push_back(Res);
@@ -12602,7 +12643,7 @@ void ARMTargetLowering::computeKnownBitsForTargetNode(const SDValue Op,
const SelectionDAG &DAG,
unsigned Depth) const {
unsigned BitWidth = Known.getBitWidth();
- Known.Zero.clearAllBits(); Known.One.clearAllBits();
+ Known.resetAll();
switch (Op.getOpcode()) {
default: break;
case ARMISD::ADDC:
@@ -12617,7 +12658,8 @@ void ARMTargetLowering::computeKnownBitsForTargetNode(const SDValue Op,
case ARMISD::CMOV: {
// Bits are known zero/one if known on the LHS and RHS.
DAG.computeKnownBits(Op.getOperand(0), Known, Depth+1);
- if (Known.Zero == 0 && Known.One == 0) return;
+ if (Known.isUnknown())
+ return;
KnownBits KnownRHS;
DAG.computeKnownBits(Op.getOperand(1), KnownRHS, Depth+1);
@@ -14015,3 +14057,8 @@ void ARMTargetLowering::insertCopiesSplitCSR(
.addReg(NewVR);
}
}
+
+void ARMTargetLowering::finalizeLowering(MachineFunction &MF) const {
+ MF.getFrameInfo().computeMaxCallFrameSize(MF);
+ TargetLoweringBase::finalizeLowering(MF);
+}
diff --git a/lib/Target/ARM/ARMISelLowering.h b/lib/Target/ARM/ARMISelLowering.h
index 76e4b60e01fb..08c51b66dfe7 100644
--- a/lib/Target/ARM/ARMISelLowering.h
+++ b/lib/Target/ARM/ARMISelLowering.h
@@ -184,6 +184,10 @@ class InstrItineraryData;
SMLALBT, // 64-bit signed accumulate multiply bottom, top 16
SMLALTB, // 64-bit signed accumulate multiply top, bottom 16
SMLALTT, // 64-bit signed accumulate multiply top, top 16
+ SMLALD, // Signed multiply accumulate long dual
+ SMLALDX, // Signed multiply accumulate long dual exchange
+ SMLSLD, // Signed multiply subtract long dual
+ SMLSLDX, // Signed multiply subtract long dual exchange
// Operands of the standard BUILD_VECTOR node are not legalized, which
// is fine if BUILD_VECTORs are always lowered to shuffles or other
@@ -540,6 +544,8 @@ class InstrItineraryData;
unsigned getNumInterleavedAccesses(VectorType *VecTy,
const DataLayout &DL) const;
+ void finalizeLowering(MachineFunction &MF) const override;
+
protected:
std::pair<const TargetRegisterClass *, uint8_t>
findRepresentativeClass(const TargetRegisterInfo *TRI,
diff --git a/lib/Target/ARM/ARMInstrInfo.td b/lib/Target/ARM/ARMInstrInfo.td
index 28eb5fc30864..a94d6048f02d 100644
--- a/lib/Target/ARM/ARMInstrInfo.td
+++ b/lib/Target/ARM/ARMInstrInfo.td
@@ -99,6 +99,11 @@ def SDT_LongMac : SDTypeProfile<2, 4, [SDTCisVT<0, i32>,
SDTCisSameAs<0, 4>,
SDTCisSameAs<0, 5>]>;
+def ARMSmlald : SDNode<"ARMISD::SMLALD", SDT_LongMac>;
+def ARMSmlaldx : SDNode<"ARMISD::SMLALDX", SDT_LongMac>;
+def ARMSmlsld : SDNode<"ARMISD::SMLSLD", SDT_LongMac>;
+def ARMSmlsldx : SDNode<"ARMISD::SMLSLDX", SDT_LongMac>;
+
// Node definitions.
def ARMWrapper : SDNode<"ARMISD::Wrapper", SDTIntUnaryOp>;
def ARMWrapperPIC : SDNode<"ARMISD::WrapperPIC", SDTIntUnaryOp>;
@@ -870,7 +875,9 @@ def imm1_16_XFORM: SDNodeXForm<imm, [{
MVT::i32);
}]>;
def Imm1_16AsmOperand: ImmAsmOperandMinusOne<1,16> { let Name = "Imm1_16"; }
-def imm1_16 : Operand<i32>, PatLeaf<(imm), [{ return Imm > 0 && Imm <= 16; }],
+def imm1_16 : Operand<i32>, ImmLeaf<i32, [{
+ return Imm > 0 && Imm <= 16;
+ }],
imm1_16_XFORM> {
let PrintMethod = "printImmPlusOneOperand";
let ParserMatchClass = Imm1_16AsmOperand;
@@ -1983,7 +1990,9 @@ def : InstAlias<"sevl$p", (HINT 5, pred:$p)>, Requires<[IsARM, HasV8]>;
def : InstAlias<"esb$p", (HINT 16, pred:$p)>, Requires<[IsARM, HasRAS]>;
def SEL : AI<(outs GPR:$Rd), (ins GPR:$Rn, GPR:$Rm), DPFrm, NoItinerary, "sel",
- "\t$Rd, $Rn, $Rm", []>, Requires<[IsARM, HasV6]> {
+ "\t$Rd, $Rn, $Rm",
+ [(set GPR:$Rd, (int_arm_sel GPR:$Rn, GPR:$Rm))]>,
+ Requires<[IsARM, HasV6]> {
bits<4> Rd;
bits<4> Rn;
bits<4> Rm;
@@ -3472,8 +3481,12 @@ def : ARMV6Pat<(add rGPR:$Rn, (sext_inreg (srl rGPR:$Rm, imm8_or_16:$rot),
(SXTAH rGPR:$Rn, rGPR:$Rm, rot_imm:$rot)>;
def SXTB16 : AI_ext_rrot_np<0b01101000, "sxtb16">;
+def : ARMV6Pat<(int_arm_sxtb16 GPR:$Src),
+ (SXTB16 GPR:$Src, 0)>;
def SXTAB16 : AI_exta_rrot_np<0b01101000, "sxtab16">;
+def : ARMV6Pat<(int_arm_sxtab16 GPR:$LHS, GPR:$RHS),
+ (SXTAB16 GPR:$LHS, GPR:$RHS, 0)>;
// Zero extenders
@@ -3493,6 +3506,8 @@ def UXTB16 : AI_ext_rrot<0b01101100,
// (UXTB16r_rot GPR:$Src, 3)>;
def : ARMV6Pat<(and (srl GPR:$Src, (i32 8)), 0xFF00FF),
(UXTB16 GPR:$Src, 1)>;
+def : ARMV6Pat<(int_arm_uxtb16 GPR:$Src),
+ (UXTB16 GPR:$Src, 0)>;
def UXTAB : AI_exta_rrot<0b01101110, "uxtab",
BinOpFrag<(add node:$LHS, (and node:$RHS, 0x00FF))>>;
@@ -3507,6 +3522,8 @@ def : ARMV6Pat<(add rGPR:$Rn, (and (srl rGPR:$Rm, imm8_or_16:$rot), 0xFFFF)),
// This isn't safe in general, the add is two 16-bit units, not a 32-bit add.
def UXTAB16 : AI_exta_rrot_np<0b01101100, "uxtab16">;
+def : ARMV6Pat<(int_arm_uxtab16 GPR:$LHS, GPR:$RHS),
+ (UXTAB16 GPR:$LHS, GPR:$RHS, 0)>;
def SBFX : I<(outs GPRnopc:$Rd),
@@ -3633,71 +3650,85 @@ class AAI<bits<8> op27_20, bits<8> op11_4, string opc,
let Unpredictable{11-8} = 0b1111;
}
-// Saturating add/subtract
+// Wrappers around the AAI class
+class AAIRevOpr<bits<8> op27_20, bits<8> op11_4, string opc,
+ list<dag> pattern = []>
+ : AAI<op27_20, op11_4, opc,
+ pattern,
+ (ins GPRnopc:$Rm, GPRnopc:$Rn),
+ "\t$Rd, $Rm, $Rn">;
+class AAIIntrinsic<bits<8> op27_20, bits<8> op11_4, string opc,
+ Intrinsic intrinsic>
+ : AAI<op27_20, op11_4, opc,
+ [(set GPRnopc:$Rd, (intrinsic GPRnopc:$Rn, GPRnopc:$Rm))]>;
+
+// Saturating add/subtract
+let hasSideEffects = 1 in {
+def QADD8 : AAIIntrinsic<0b01100010, 0b11111001, "qadd8", int_arm_qadd8>;
+def QADD16 : AAIIntrinsic<0b01100010, 0b11110001, "qadd16", int_arm_qadd16>;
+def QSUB16 : AAIIntrinsic<0b01100010, 0b11110111, "qsub16", int_arm_qsub16>;
+def QSUB8 : AAIIntrinsic<0b01100010, 0b11111111, "qsub8", int_arm_qsub8>;
+
+def QDADD : AAIRevOpr<0b00010100, 0b00000101, "qdadd",
+ [(set GPRnopc:$Rd, (int_arm_qadd (int_arm_qadd GPRnopc:$Rm,
+ GPRnopc:$Rm),
+ GPRnopc:$Rn))]>;
+def QDSUB : AAIRevOpr<0b00010110, 0b00000101, "qdsub",
+ [(set GPRnopc:$Rd, (int_arm_qsub GPRnopc:$Rm,
+ (int_arm_qadd GPRnopc:$Rn, GPRnopc:$Rn)))]>;
+def QSUB : AAIRevOpr<0b00010010, 0b00000101, "qsub",
+ [(set GPRnopc:$Rd, (int_arm_qsub GPRnopc:$Rm, GPRnopc:$Rn))]>;
let DecoderMethod = "DecodeQADDInstruction" in
-def QADD : AAI<0b00010000, 0b00000101, "qadd",
- [(set GPRnopc:$Rd, (int_arm_qadd GPRnopc:$Rm, GPRnopc:$Rn))],
- (ins GPRnopc:$Rm, GPRnopc:$Rn), "\t$Rd, $Rm, $Rn">;
-
-def QSUB : AAI<0b00010010, 0b00000101, "qsub",
- [(set GPRnopc:$Rd, (int_arm_qsub GPRnopc:$Rm, GPRnopc:$Rn))],
- (ins GPRnopc:$Rm, GPRnopc:$Rn), "\t$Rd, $Rm, $Rn">;
-def QDADD : AAI<0b00010100, 0b00000101, "qdadd", [],
- (ins GPRnopc:$Rm, GPRnopc:$Rn),
- "\t$Rd, $Rm, $Rn">;
-def QDSUB : AAI<0b00010110, 0b00000101, "qdsub", [],
- (ins GPRnopc:$Rm, GPRnopc:$Rn),
- "\t$Rd, $Rm, $Rn">;
-
-def QADD16 : AAI<0b01100010, 0b11110001, "qadd16">;
-def QADD8 : AAI<0b01100010, 0b11111001, "qadd8">;
-def QASX : AAI<0b01100010, 0b11110011, "qasx">;
-def QSAX : AAI<0b01100010, 0b11110101, "qsax">;
-def QSUB16 : AAI<0b01100010, 0b11110111, "qsub16">;
-def QSUB8 : AAI<0b01100010, 0b11111111, "qsub8">;
-def UQADD16 : AAI<0b01100110, 0b11110001, "uqadd16">;
-def UQADD8 : AAI<0b01100110, 0b11111001, "uqadd8">;
-def UQASX : AAI<0b01100110, 0b11110011, "uqasx">;
-def UQSAX : AAI<0b01100110, 0b11110101, "uqsax">;
-def UQSUB16 : AAI<0b01100110, 0b11110111, "uqsub16">;
-def UQSUB8 : AAI<0b01100110, 0b11111111, "uqsub8">;
+ def QADD : AAIRevOpr<0b00010000, 0b00000101, "qadd",
+ [(set GPRnopc:$Rd, (int_arm_qadd GPRnopc:$Rm, GPRnopc:$Rn))]>;
+}
+
+def UQADD16 : AAIIntrinsic<0b01100110, 0b11110001, "uqadd16", int_arm_uqadd16>;
+def UQADD8 : AAIIntrinsic<0b01100110, 0b11111001, "uqadd8", int_arm_uqadd8>;
+def UQSUB16 : AAIIntrinsic<0b01100110, 0b11110111, "uqsub16", int_arm_uqsub16>;
+def UQSUB8 : AAIIntrinsic<0b01100110, 0b11111111, "uqsub8", int_arm_uqsub8>;
+def QASX : AAIIntrinsic<0b01100010, 0b11110011, "qasx", int_arm_qasx>;
+def QSAX : AAIIntrinsic<0b01100010, 0b11110101, "qsax", int_arm_qsax>;
+def UQASX : AAIIntrinsic<0b01100110, 0b11110011, "uqasx", int_arm_uqasx>;
+def UQSAX : AAIIntrinsic<0b01100110, 0b11110101, "uqsax", int_arm_uqsax>;
// Signed/Unsigned add/subtract
-def SASX : AAI<0b01100001, 0b11110011, "sasx">;
-def SADD16 : AAI<0b01100001, 0b11110001, "sadd16">;
-def SADD8 : AAI<0b01100001, 0b11111001, "sadd8">;
-def SSAX : AAI<0b01100001, 0b11110101, "ssax">;
-def SSUB16 : AAI<0b01100001, 0b11110111, "ssub16">;
-def SSUB8 : AAI<0b01100001, 0b11111111, "ssub8">;
-def UASX : AAI<0b01100101, 0b11110011, "uasx">;
-def UADD16 : AAI<0b01100101, 0b11110001, "uadd16">;
-def UADD8 : AAI<0b01100101, 0b11111001, "uadd8">;
-def USAX : AAI<0b01100101, 0b11110101, "usax">;
-def USUB16 : AAI<0b01100101, 0b11110111, "usub16">;
-def USUB8 : AAI<0b01100101, 0b11111111, "usub8">;
+def SASX : AAIIntrinsic<0b01100001, 0b11110011, "sasx", int_arm_sasx>;
+def SADD16 : AAIIntrinsic<0b01100001, 0b11110001, "sadd16", int_arm_sadd16>;
+def SADD8 : AAIIntrinsic<0b01100001, 0b11111001, "sadd8", int_arm_sadd8>;
+def SSAX : AAIIntrinsic<0b01100001, 0b11110101, "ssax", int_arm_ssax>;
+def SSUB16 : AAIIntrinsic<0b01100001, 0b11110111, "ssub16", int_arm_ssub16>;
+def SSUB8 : AAIIntrinsic<0b01100001, 0b11111111, "ssub8", int_arm_ssub8>;
+def UASX : AAIIntrinsic<0b01100101, 0b11110011, "uasx", int_arm_uasx>;
+def UADD16 : AAIIntrinsic<0b01100101, 0b11110001, "uadd16", int_arm_uadd16>;
+def UADD8 : AAIIntrinsic<0b01100101, 0b11111001, "uadd8", int_arm_uadd8>;
+def USAX : AAIIntrinsic<0b01100101, 0b11110101, "usax", int_arm_usax>;
+def USUB16 : AAIIntrinsic<0b01100101, 0b11110111, "usub16", int_arm_usub16>;
+def USUB8 : AAIIntrinsic<0b01100101, 0b11111111, "usub8", int_arm_usub8>;
// Signed/Unsigned halving add/subtract
-def SHASX : AAI<0b01100011, 0b11110011, "shasx">;
-def SHADD16 : AAI<0b01100011, 0b11110001, "shadd16">;
-def SHADD8 : AAI<0b01100011, 0b11111001, "shadd8">;
-def SHSAX : AAI<0b01100011, 0b11110101, "shsax">;
-def SHSUB16 : AAI<0b01100011, 0b11110111, "shsub16">;
-def SHSUB8 : AAI<0b01100011, 0b11111111, "shsub8">;
-def UHASX : AAI<0b01100111, 0b11110011, "uhasx">;
-def UHADD16 : AAI<0b01100111, 0b11110001, "uhadd16">;
-def UHADD8 : AAI<0b01100111, 0b11111001, "uhadd8">;
-def UHSAX : AAI<0b01100111, 0b11110101, "uhsax">;
-def UHSUB16 : AAI<0b01100111, 0b11110111, "uhsub16">;
-def UHSUB8 : AAI<0b01100111, 0b11111111, "uhsub8">;
+def SHASX : AAIIntrinsic<0b01100011, 0b11110011, "shasx", int_arm_shasx>;
+def SHADD16 : AAIIntrinsic<0b01100011, 0b11110001, "shadd16", int_arm_shadd16>;
+def SHADD8 : AAIIntrinsic<0b01100011, 0b11111001, "shadd8", int_arm_shadd8>;
+def SHSAX : AAIIntrinsic<0b01100011, 0b11110101, "shsax", int_arm_shsax>;
+def SHSUB16 : AAIIntrinsic<0b01100011, 0b11110111, "shsub16", int_arm_shsub16>;
+def SHSUB8 : AAIIntrinsic<0b01100011, 0b11111111, "shsub8", int_arm_shsub8>;
+def UHASX : AAIIntrinsic<0b01100111, 0b11110011, "uhasx", int_arm_uhasx>;
+def UHADD16 : AAIIntrinsic<0b01100111, 0b11110001, "uhadd16", int_arm_uhadd16>;
+def UHADD8 : AAIIntrinsic<0b01100111, 0b11111001, "uhadd8", int_arm_uhadd8>;
+def UHSAX : AAIIntrinsic<0b01100111, 0b11110101, "uhsax", int_arm_uhsax>;
+def UHSUB16 : AAIIntrinsic<0b01100111, 0b11110111, "uhsub16", int_arm_uhsub16>;
+def UHSUB8 : AAIIntrinsic<0b01100111, 0b11111111, "uhsub8", int_arm_uhsub8>;
// Unsigned Sum of Absolute Differences [and Accumulate].
def USAD8 : AI<(outs GPR:$Rd), (ins GPR:$Rn, GPR:$Rm),
MulFrm /* for convenience */, NoItinerary, "usad8",
- "\t$Rd, $Rn, $Rm", []>,
+ "\t$Rd, $Rn, $Rm",
+ [(set GPR:$Rd, (int_arm_usad8 GPR:$Rn, GPR:$Rm))]>,
Requires<[IsARM, HasV6]>, Sched<[WriteALU, ReadALU, ReadALU]> {
bits<4> Rd;
bits<4> Rn;
@@ -3711,7 +3742,8 @@ def USAD8 : AI<(outs GPR:$Rd), (ins GPR:$Rn, GPR:$Rm),
}
def USADA8 : AI<(outs GPR:$Rd), (ins GPR:$Rn, GPR:$Rm, GPR:$Ra),
MulFrm /* for convenience */, NoItinerary, "usada8",
- "\t$Rd, $Rn, $Rm, $Ra", []>,
+ "\t$Rd, $Rn, $Rm, $Ra",
+ [(set GPR:$Rd, (int_arm_usada8 GPR:$Rn, GPR:$Rm, GPR:$Ra))]>,
Requires<[IsARM, HasV6]>, Sched<[WriteALU, ReadALU, ReadALU]>{
bits<4> Rd;
bits<4> Rn;
@@ -3726,7 +3758,6 @@ def USADA8 : AI<(outs GPR:$Rd), (ins GPR:$Rn, GPR:$Rm, GPR:$Ra),
}
// Signed/Unsigned saturate
-
def SSAT : AI<(outs GPRnopc:$Rd),
(ins imm1_32:$sat_imm, GPRnopc:$Rn, shift_imm:$sh),
SatFrm, NoItinerary, "ssat", "\t$Rd, $sat_imm, $Rn$sh", []>,
@@ -3795,6 +3826,10 @@ def : ARMV6Pat<(int_arm_usat GPRnopc:$a, imm0_31:$pos),
(USAT imm0_31:$pos, GPRnopc:$a, 0)>;
def : ARMPat<(ARMssatnoshift GPRnopc:$Rn, imm0_31:$imm),
(SSAT imm0_31:$imm, GPRnopc:$Rn, 0)>;
+def : ARMV6Pat<(int_arm_ssat16 GPRnopc:$a, imm1_16:$pos),
+ (SSAT16 imm1_16:$pos, GPRnopc:$a)>;
+def : ARMV6Pat<(int_arm_usat16 GPRnopc:$a, imm0_15:$pos),
+ (USAT16 imm0_15:$pos, GPRnopc:$a)>;
//===----------------------------------------------------------------------===//
// Bitwise Instructions.
@@ -4220,8 +4255,8 @@ multiclass AI_smla<string opc> {
IIC_iMAC16, !strconcat(opc, "wt"), "\t$Rd, $Rn, $Rm, $Ra",
[(set GPRnopc:$Rd,
(add GPR:$Ra, (ARMsmulwt GPRnopc:$Rn, GPRnopc:$Rm)))]>,
- Requires<[IsARM, HasV5TE, UseMulOps]>,
- Sched<[WriteMAC16, ReadMUL, ReadMUL, ReadMAC]>;
+ Requires<[IsARM, HasV5TE, UseMulOps]>,
+ Sched<[WriteMAC16, ReadMUL, ReadMUL, ReadMAC]>;
}
}
@@ -4255,7 +4290,8 @@ def : ARMV5TEPat<(ARMsmlaltt GPR:$Rn, GPR:$Rm, GPR:$RLo, GPR:$RHi),
// Helper class for AI_smld.
class AMulDualIbase<bit long, bit sub, bit swap, dag oops, dag iops,
InstrItinClass itin, string opc, string asm>
- : AI<oops, iops, MulFrm, itin, opc, asm, []>, Requires<[IsARM, HasV6]> {
+ : AI<oops, iops, MulFrm, itin, opc, asm, []>,
+ Requires<[IsARM, HasV6]> {
bits<4> Rn;
bits<4> Rm;
let Inst{27-23} = 0b01110;
@@ -4305,20 +4341,40 @@ multiclass AI_smld<bit sub, string opc> {
Sched<[WriteMAC32, ReadMUL, ReadMUL, ReadMAC]>;
def LD: AMulDualI64<1, sub, 0, (outs GPRnopc:$RdLo, GPRnopc:$RdHi),
- (ins GPRnopc:$Rn, GPRnopc:$Rm), NoItinerary,
+ (ins GPRnopc:$Rn, GPRnopc:$Rm, GPRnopc:$RLo, GPRnopc:$RHi),
+ NoItinerary,
!strconcat(opc, "ld"), "\t$RdLo, $RdHi, $Rn, $Rm">,
+ RegConstraint<"$RLo = $RdLo, $RHi = $RdHi">,
Sched<[WriteMAC64Lo, WriteMAC64Hi, ReadMUL, ReadMUL, ReadMAC, ReadMAC]>;
def LDX : AMulDualI64<1, sub, 1, (outs GPRnopc:$RdLo, GPRnopc:$RdHi),
- (ins GPRnopc:$Rn, GPRnopc:$Rm), NoItinerary,
+ (ins GPRnopc:$Rn, GPRnopc:$Rm, GPRnopc:$RLo, GPRnopc:$RHi),
+ NoItinerary,
!strconcat(opc, "ldx"),"\t$RdLo, $RdHi, $Rn, $Rm">,
+ RegConstraint<"$RLo = $RdLo, $RHi = $RdHi">,
Sched<[WriteMUL64Lo, WriteMUL64Hi, ReadMUL, ReadMUL]>;
-
}
defm SMLA : AI_smld<0, "smla">;
defm SMLS : AI_smld<1, "smls">;
+def : ARMV6Pat<(int_arm_smlad GPRnopc:$Rn, GPRnopc:$Rm, GPR:$Ra),
+ (SMLAD GPRnopc:$Rn, GPRnopc:$Rm, GPRnopc:$Ra)>;
+def : ARMV6Pat<(int_arm_smladx GPRnopc:$Rn, GPRnopc:$Rm, GPR:$Ra),
+ (SMLADX GPRnopc:$Rn, GPRnopc:$Rm, GPRnopc:$Ra)>;
+def : ARMV6Pat<(int_arm_smlsd GPRnopc:$Rn, GPRnopc:$Rm, GPR:$Ra),
+ (SMLSD GPRnopc:$Rn, GPRnopc:$Rm, GPRnopc:$Ra)>;
+def : ARMV6Pat<(int_arm_smlsdx GPRnopc:$Rn, GPRnopc:$Rm, GPR:$Ra),
+ (SMLSDX GPRnopc:$Rn, GPRnopc:$Rm, GPRnopc:$Ra)>;
+def : ARMV6Pat<(ARMSmlald GPRnopc:$Rn, GPRnopc:$Rm, GPRnopc:$RLo, GPRnopc:$RHi),
+ (SMLALD GPRnopc:$Rn, GPRnopc:$Rm, GPRnopc:$RLo, GPRnopc:$RHi)>;
+def : ARMV6Pat<(ARMSmlaldx GPRnopc:$Rn, GPRnopc:$Rm, GPRnopc:$RLo, GPRnopc:$RHi),
+ (SMLALDX GPRnopc:$Rn, GPRnopc:$Rm, GPRnopc:$RLo, GPRnopc:$RHi)>;
+def : ARMV6Pat<(ARMSmlsld GPRnopc:$Rn, GPRnopc:$Rm, GPRnopc:$RLo, GPRnopc:$RHi),
+ (SMLSLD GPRnopc:$Rn, GPRnopc:$Rm, GPRnopc:$RLo, GPRnopc:$RHi)>;
+def : ARMV6Pat<(ARMSmlsldx GPRnopc:$Rn, GPRnopc:$Rm, GPRnopc:$RLo, GPRnopc:$RHi),
+ (SMLSLDX GPRnopc:$Rn, GPRnopc:$Rm, GPRnopc:$RLo, GPRnopc:$RHi)>;
+
multiclass AI_sdml<bit sub, string opc> {
def D:AMulDualI<0, sub, 0, (outs GPRnopc:$Rd), (ins GPRnopc:$Rn, GPRnopc:$Rm),
@@ -4332,6 +4388,15 @@ multiclass AI_sdml<bit sub, string opc> {
defm SMUA : AI_sdml<0, "smua">;
defm SMUS : AI_sdml<1, "smus">;
+def : ARMV6Pat<(int_arm_smuad GPRnopc:$Rn, GPRnopc:$Rm),
+ (SMUAD GPRnopc:$Rn, GPRnopc:$Rm)>;
+def : ARMV6Pat<(int_arm_smuadx GPRnopc:$Rn, GPRnopc:$Rm),
+ (SMUADX GPRnopc:$Rn, GPRnopc:$Rm)>;
+def : ARMV6Pat<(int_arm_smusd GPRnopc:$Rn, GPRnopc:$Rm),
+ (SMUSD GPRnopc:$Rn, GPRnopc:$Rm)>;
+def : ARMV6Pat<(int_arm_smusdx GPRnopc:$Rn, GPRnopc:$Rm),
+ (SMUSDX GPRnopc:$Rn, GPRnopc:$Rm)>;
+
//===----------------------------------------------------------------------===//
// Division Instructions (ARMv7-A with virtualization extension)
//
@@ -5648,6 +5713,32 @@ def : ARMV5MOPat<(add GPR:$acc,
(SMLATB GPR:$a, GPR:$b, GPR:$acc)>,
Sched<[WriteMUL32, ReadMUL, ReadMUL]>;
+def : ARMV5TEPat<(int_arm_smulbb GPR:$a, GPR:$b),
+ (SMULBB GPR:$a, GPR:$b)>;
+def : ARMV5TEPat<(int_arm_smulbt GPR:$a, GPR:$b),
+ (SMULBT GPR:$a, GPR:$b)>;
+def : ARMV5TEPat<(int_arm_smultb GPR:$a, GPR:$b),
+ (SMULTB GPR:$a, GPR:$b)>;
+def : ARMV5TEPat<(int_arm_smultt GPR:$a, GPR:$b),
+ (SMULTT GPR:$a, GPR:$b)>;
+def : ARMV5TEPat<(int_arm_smulwb GPR:$a, GPR:$b),
+ (SMULWB GPR:$a, GPR:$b)>;
+def : ARMV5TEPat<(int_arm_smulwt GPR:$a, GPR:$b),
+ (SMULWT GPR:$a, GPR:$b)>;
+
+def : ARMV5TEPat<(int_arm_smlabb GPR:$a, GPR:$b, GPR:$acc),
+ (SMLABB GPR:$a, GPR:$b, GPR:$acc)>;
+def : ARMV5TEPat<(int_arm_smlabt GPR:$a, GPR:$b, GPR:$acc),
+ (SMLABT GPR:$a, GPR:$b, GPR:$acc)>;
+def : ARMV5TEPat<(int_arm_smlatb GPR:$a, GPR:$b, GPR:$acc),
+ (SMLATB GPR:$a, GPR:$b, GPR:$acc)>;
+def : ARMV5TEPat<(int_arm_smlatt GPR:$a, GPR:$b, GPR:$acc),
+ (SMLATT GPR:$a, GPR:$b, GPR:$acc)>;
+def : ARMV5TEPat<(int_arm_smlawb GPR:$a, GPR:$b, GPR:$acc),
+ (SMLAWB GPR:$a, GPR:$b, GPR:$acc)>;
+def : ARMV5TEPat<(int_arm_smlawt GPR:$a, GPR:$b, GPR:$acc),
+ (SMLAWT GPR:$a, GPR:$b, GPR:$acc)>;
+
// Pre-v7 uses MCR for synchronization barriers.
def : ARMPat<(ARMMemBarrierMCR GPR:$zero), (MCR 15, 0, GPR:$zero, 7, 10, 5)>,
Requires<[IsARM, HasV6]>;
diff --git a/lib/Target/ARM/ARMInstrNEON.td b/lib/Target/ARM/ARMInstrNEON.td
index 9b08c612e16b..51290e5a5b93 100644
--- a/lib/Target/ARM/ARMInstrNEON.td
+++ b/lib/Target/ARM/ARMInstrNEON.td
@@ -5558,8 +5558,7 @@ defm VSRI : N2VShInsR_QHSD<1, 1, 0b0100, 1, "vsri">;
// VABS : Vector Absolute Value
defm VABS : N2VInt_QHS<0b11, 0b11, 0b01, 0b00110, 0,
- IIC_VUNAiD, IIC_VUNAiQ, "vabs", "s",
- int_arm_neon_vabs>;
+ IIC_VUNAiD, IIC_VUNAiQ, "vabs", "s", abs>;
def VABSfd : N2VD<0b11, 0b11, 0b10, 0b01, 0b01110, 0,
"vabs", "f32",
v2f32, v2f32, fabs>;
@@ -5575,29 +5574,6 @@ def VABShq : N2VQ<0b11, 0b11, 0b01, 0b01, 0b01110, 0,
v8f16, v8f16, fabs>,
Requires<[HasNEON, HasFullFP16]>;
-def : Pat<(xor (v2i32 (bitconvert (v8i8 (NEONvshrs DPR:$src, (i32 7))))),
- (v2i32 (bitconvert (v8i8 (add DPR:$src,
- (NEONvshrs DPR:$src, (i32 7))))))),
- (VABSv8i8 DPR:$src)>;
-def : Pat<(xor (v2i32 (bitconvert (v4i16 (NEONvshrs DPR:$src, (i32 15))))),
- (v2i32 (bitconvert (v4i16 (add DPR:$src,
- (NEONvshrs DPR:$src, (i32 15))))))),
- (VABSv4i16 DPR:$src)>;
-def : Pat<(xor (v2i32 (NEONvshrs DPR:$src, (i32 31))),
- (v2i32 (add DPR:$src, (NEONvshrs DPR:$src, (i32 31))))),
- (VABSv2i32 DPR:$src)>;
-def : Pat<(xor (v4i32 (bitconvert (v16i8 (NEONvshrs QPR:$src, (i32 7))))),
- (v4i32 (bitconvert (v16i8 (add QPR:$src,
- (NEONvshrs QPR:$src, (i32 7))))))),
- (VABSv16i8 QPR:$src)>;
-def : Pat<(xor (v4i32 (bitconvert (v8i16 (NEONvshrs QPR:$src, (i32 15))))),
- (v4i32 (bitconvert (v8i16 (add QPR:$src,
- (NEONvshrs QPR:$src, (i32 15))))))),
- (VABSv8i16 QPR:$src)>;
-def : Pat<(xor (v4i32 (NEONvshrs QPR:$src, (i32 31))),
- (v4i32 (add QPR:$src, (NEONvshrs QPR:$src, (i32 31))))),
- (VABSv4i32 QPR:$src)>;
-
// VQABS : Vector Saturating Absolute Value
defm VQABS : N2VInt_QHS<0b11, 0b11, 0b00, 0b01110, 0,
IIC_VQUNAiD, IIC_VQUNAiQ, "vqabs", "s",
diff --git a/lib/Target/ARM/ARMInstrThumb2.td b/lib/Target/ARM/ARMInstrThumb2.td
index f710ee6a7e77..bf3d820e7b7d 100644
--- a/lib/Target/ARM/ARMInstrThumb2.td
+++ b/lib/Target/ARM/ARMInstrThumb2.td
@@ -1993,6 +1993,10 @@ def : Thumb2DSPPat<(add rGPR:$Rn,
def : Thumb2DSPPat<(add rGPR:$Rn,
(sext_inreg (rotr rGPR:$Rm, rot_imm:$rot), i16)),
(t2SXTAH rGPR:$Rn, rGPR:$Rm, rot_imm:$rot)>;
+def : Thumb2DSPPat<(int_arm_sxtb16 rGPR:$Rn),
+ (t2SXTB16 rGPR:$Rn, 0)>;
+def : Thumb2DSPPat<(int_arm_sxtab16 rGPR:$Rn, rGPR:$Rm),
+ (t2SXTAB16 rGPR:$Rn, rGPR:$Rm, 0)>;
// A simple right-shift can also be used in most cases (the exception is the
@@ -2026,6 +2030,9 @@ def : Thumb2DSPPat<(and (rotr rGPR:$Rm, rot_imm:$rot), 0x0000FFFF),
def : Thumb2DSPPat<(and (rotr rGPR:$Rm, rot_imm:$rot), 0x00FF00FF),
(t2UXTB16 rGPR:$Rm, rot_imm:$rot)>;
+def : Thumb2DSPPat<(int_arm_uxtb16 rGPR:$Rm),
+ (t2UXTB16 rGPR:$Rm, 0)>;
+
// FIXME: This pattern incorrectly assumes the shl operator is a rotate.
// The transformation should probably be done as a combiner action
// instead so we can include a check for masking back in the upper
@@ -2053,6 +2060,8 @@ def : Thumb2DSPPat<(add rGPR:$Rn, (and (srl rGPR:$Rm, rot_imm:$rot),
def : Thumb2DSPPat<(add rGPR:$Rn, (and (srl rGPR:$Rm, imm8_or_16:$rot),
0xFFFF)),
(t2UXTAH rGPR:$Rn, rGPR:$Rm, rot_imm:$rot)>;
+def : Thumb2DSPPat<(int_arm_uxtab16 rGPR:$Rn, rGPR:$Rm),
+ (t2UXTAB16 rGPR:$Rn, rGPR:$Rm, 0)>;
}
@@ -2137,10 +2146,9 @@ def : T2Pat<(ARMadde rGPR:$src, t2_so_imm_not:$imm, CPSR),
def : T2Pat<(ARMadde rGPR:$src, imm0_65535_neg:$imm, CPSR),
(t2SBCrr rGPR:$src, (t2MOVi16 (imm_not_XFORM imm:$imm)))>;
-// Select Bytes -- for disassembly only
-
def t2SEL : T2ThreeReg<(outs GPR:$Rd), (ins GPR:$Rn, GPR:$Rm),
- NoItinerary, "sel", "\t$Rd, $Rn, $Rm", []>,
+ NoItinerary, "sel", "\t$Rd, $Rn, $Rm",
+ [(set GPR:$Rd, (int_arm_sel GPR:$Rn, GPR:$Rm))]>,
Requires<[IsThumb2, HasDSP]> {
let Inst{31-27} = 0b11111;
let Inst{26-24} = 0b010;
@@ -2154,9 +2162,7 @@ def t2SEL : T2ThreeReg<(outs GPR:$Rd), (ins GPR:$Rn, GPR:$Rm),
// A6.3.13, A6.3.14, A6.3.15 Parallel addition and subtraction (signed/unsigned)
// And Miscellaneous operations -- for disassembly only
class T2I_pam<bits<3> op22_20, bits<4> op7_4, string opc,
- list<dag> pat = [/* For disassembly only; pattern left blank */],
- dag iops = (ins rGPR:$Rn, rGPR:$Rm),
- string asm = "\t$Rd, $Rn, $Rm">
+ list<dag> pat, dag iops, string asm>
: T2I<(outs rGPR:$Rd), iops, NoItinerary, opc, asm, pat>,
Requires<[IsThumb2, HasDSP]> {
let Inst{31-27} = 0b11111;
@@ -2174,60 +2180,72 @@ class T2I_pam<bits<3> op22_20, bits<4> op7_4, string opc,
let Inst{3-0} = Rm;
}
-// Saturating add/subtract -- for disassembly only
-
-def t2QADD : T2I_pam<0b000, 0b1000, "qadd",
- [(set rGPR:$Rd, (int_arm_qadd rGPR:$Rn, rGPR:$Rm))],
- (ins rGPR:$Rm, rGPR:$Rn), "\t$Rd, $Rm, $Rn">;
-def t2QADD16 : T2I_pam<0b001, 0b0001, "qadd16">;
-def t2QADD8 : T2I_pam<0b000, 0b0001, "qadd8">;
-def t2QASX : T2I_pam<0b010, 0b0001, "qasx">;
-def t2QDADD : T2I_pam<0b000, 0b1001, "qdadd", [],
- (ins rGPR:$Rm, rGPR:$Rn), "\t$Rd, $Rm, $Rn">;
-def t2QDSUB : T2I_pam<0b000, 0b1011, "qdsub", [],
- (ins rGPR:$Rm, rGPR:$Rn), "\t$Rd, $Rm, $Rn">;
-def t2QSAX : T2I_pam<0b110, 0b0001, "qsax">;
-def t2QSUB : T2I_pam<0b000, 0b1010, "qsub",
- [(set rGPR:$Rd, (int_arm_qsub rGPR:$Rn, rGPR:$Rm))],
- (ins rGPR:$Rm, rGPR:$Rn), "\t$Rd, $Rm, $Rn">;
-def t2QSUB16 : T2I_pam<0b101, 0b0001, "qsub16">;
-def t2QSUB8 : T2I_pam<0b100, 0b0001, "qsub8">;
-def t2UQADD16 : T2I_pam<0b001, 0b0101, "uqadd16">;
-def t2UQADD8 : T2I_pam<0b000, 0b0101, "uqadd8">;
-def t2UQASX : T2I_pam<0b010, 0b0101, "uqasx">;
-def t2UQSAX : T2I_pam<0b110, 0b0101, "uqsax">;
-def t2UQSUB16 : T2I_pam<0b101, 0b0101, "uqsub16">;
-def t2UQSUB8 : T2I_pam<0b100, 0b0101, "uqsub8">;
-
-// Signed/Unsigned add/subtract -- for disassembly only
-
-def t2SASX : T2I_pam<0b010, 0b0000, "sasx">;
-def t2SADD16 : T2I_pam<0b001, 0b0000, "sadd16">;
-def t2SADD8 : T2I_pam<0b000, 0b0000, "sadd8">;
-def t2SSAX : T2I_pam<0b110, 0b0000, "ssax">;
-def t2SSUB16 : T2I_pam<0b101, 0b0000, "ssub16">;
-def t2SSUB8 : T2I_pam<0b100, 0b0000, "ssub8">;
-def t2UASX : T2I_pam<0b010, 0b0100, "uasx">;
-def t2UADD16 : T2I_pam<0b001, 0b0100, "uadd16">;
-def t2UADD8 : T2I_pam<0b000, 0b0100, "uadd8">;
-def t2USAX : T2I_pam<0b110, 0b0100, "usax">;
-def t2USUB16 : T2I_pam<0b101, 0b0100, "usub16">;
-def t2USUB8 : T2I_pam<0b100, 0b0100, "usub8">;
-
-// Signed/Unsigned halving add/subtract -- for disassembly only
-
-def t2SHASX : T2I_pam<0b010, 0b0010, "shasx">;
-def t2SHADD16 : T2I_pam<0b001, 0b0010, "shadd16">;
-def t2SHADD8 : T2I_pam<0b000, 0b0010, "shadd8">;
-def t2SHSAX : T2I_pam<0b110, 0b0010, "shsax">;
-def t2SHSUB16 : T2I_pam<0b101, 0b0010, "shsub16">;
-def t2SHSUB8 : T2I_pam<0b100, 0b0010, "shsub8">;
-def t2UHASX : T2I_pam<0b010, 0b0110, "uhasx">;
-def t2UHADD16 : T2I_pam<0b001, 0b0110, "uhadd16">;
-def t2UHADD8 : T2I_pam<0b000, 0b0110, "uhadd8">;
-def t2UHSAX : T2I_pam<0b110, 0b0110, "uhsax">;
-def t2UHSUB16 : T2I_pam<0b101, 0b0110, "uhsub16">;
-def t2UHSUB8 : T2I_pam<0b100, 0b0110, "uhsub8">;
+class T2I_pam_intrinsics<bits<3> op22_20, bits<4> op7_4, string opc,
+ Intrinsic intrinsic>
+ : T2I_pam<op22_20, op7_4, opc,
+ [(set rGPR:$Rd, (intrinsic rGPR:$Rn, rGPR:$Rm))],
+ (ins rGPR:$Rn, rGPR:$Rm), "\t$Rd, $Rn, $Rm">;
+
+class T2I_pam_intrinsics_rev<bits<3> op22_20, bits<4> op7_4, string opc>
+ : T2I_pam<op22_20, op7_4, opc, [],
+ (ins rGPR:$Rm, rGPR:$Rn), "\t$Rd, $Rm, $Rn">;
+
+// Saturating add/subtract
+def t2QADD16 : T2I_pam_intrinsics<0b001, 0b0001, "qadd16", int_arm_qadd16>;
+def t2QADD8 : T2I_pam_intrinsics<0b000, 0b0001, "qadd8", int_arm_qadd8>;
+def t2QASX : T2I_pam_intrinsics<0b010, 0b0001, "qasx", int_arm_qasx>;
+def t2UQSUB8 : T2I_pam_intrinsics<0b100, 0b0101, "uqsub8", int_arm_uqsub8>;
+def t2QSAX : T2I_pam_intrinsics<0b110, 0b0001, "qsax", int_arm_qsax>;
+def t2QSUB16 : T2I_pam_intrinsics<0b101, 0b0001, "qsub16", int_arm_qsub16>;
+def t2QSUB8 : T2I_pam_intrinsics<0b100, 0b0001, "qsub8", int_arm_qsub8>;
+def t2UQADD16 : T2I_pam_intrinsics<0b001, 0b0101, "uqadd16", int_arm_uqadd16>;
+def t2UQADD8 : T2I_pam_intrinsics<0b000, 0b0101, "uqadd8", int_arm_uqadd8>;
+def t2UQASX : T2I_pam_intrinsics<0b010, 0b0101, "uqasx", int_arm_uqasx>;
+def t2UQSAX : T2I_pam_intrinsics<0b110, 0b0101, "uqsax", int_arm_uqsax>;
+def t2UQSUB16 : T2I_pam_intrinsics<0b101, 0b0101, "uqsub16", int_arm_uqsub16>;
+def t2QADD : T2I_pam_intrinsics_rev<0b000, 0b1000, "qadd">;
+def t2QSUB : T2I_pam_intrinsics_rev<0b000, 0b1010, "qsub">;
+def t2QDADD : T2I_pam_intrinsics_rev<0b000, 0b1001, "qdadd">;
+def t2QDSUB : T2I_pam_intrinsics_rev<0b000, 0b1011, "qdsub">;
+
+def : Thumb2DSPPat<(int_arm_qadd rGPR:$Rm, rGPR:$Rn),
+ (t2QADD rGPR:$Rm, rGPR:$Rn)>;
+def : Thumb2DSPPat<(int_arm_qsub rGPR:$Rm, rGPR:$Rn),
+ (t2QSUB rGPR:$Rm, rGPR:$Rn)>;
+def : Thumb2DSPPat<(int_arm_qadd(int_arm_qadd rGPR:$Rm, rGPR:$Rm), rGPR:$Rn),
+ (t2QDADD rGPR:$Rm, rGPR:$Rn)>;
+def : Thumb2DSPPat<(int_arm_qsub rGPR:$Rm, (int_arm_qadd rGPR:$Rn, rGPR:$Rn)),
+ (t2QDSUB rGPR:$Rm, rGPR:$Rn)>;
+
+// Signed/Unsigned add/subtract
+
+def t2SASX : T2I_pam_intrinsics<0b010, 0b0000, "sasx", int_arm_sasx>;
+def t2SADD16 : T2I_pam_intrinsics<0b001, 0b0000, "sadd16", int_arm_sadd16>;
+def t2SADD8 : T2I_pam_intrinsics<0b000, 0b0000, "sadd8", int_arm_sadd8>;
+def t2SSAX : T2I_pam_intrinsics<0b110, 0b0000, "ssax", int_arm_ssax>;
+def t2SSUB16 : T2I_pam_intrinsics<0b101, 0b0000, "ssub16", int_arm_ssub16>;
+def t2SSUB8 : T2I_pam_intrinsics<0b100, 0b0000, "ssub8", int_arm_ssub8>;
+def t2UASX : T2I_pam_intrinsics<0b010, 0b0100, "uasx", int_arm_uasx>;
+def t2UADD16 : T2I_pam_intrinsics<0b001, 0b0100, "uadd16", int_arm_uadd16>;
+def t2UADD8 : T2I_pam_intrinsics<0b000, 0b0100, "uadd8", int_arm_uadd8>;
+def t2USAX : T2I_pam_intrinsics<0b110, 0b0100, "usax", int_arm_usax>;
+def t2USUB16 : T2I_pam_intrinsics<0b101, 0b0100, "usub16", int_arm_usub16>;
+def t2USUB8 : T2I_pam_intrinsics<0b100, 0b0100, "usub8", int_arm_usub8>;
+
+// Signed/Unsigned halving add/subtract
+
+def t2SHASX : T2I_pam_intrinsics<0b010, 0b0010, "shasx", int_arm_shasx>;
+def t2SHADD16 : T2I_pam_intrinsics<0b001, 0b0010, "shadd16", int_arm_shadd16>;
+def t2SHADD8 : T2I_pam_intrinsics<0b000, 0b0010, "shadd8", int_arm_shadd8>;
+def t2SHSAX : T2I_pam_intrinsics<0b110, 0b0010, "shsax", int_arm_shsax>;
+def t2SHSUB16 : T2I_pam_intrinsics<0b101, 0b0010, "shsub16", int_arm_shsub16>;
+def t2SHSUB8 : T2I_pam_intrinsics<0b100, 0b0010, "shsub8", int_arm_shsub8>;
+def t2UHASX : T2I_pam_intrinsics<0b010, 0b0110, "uhasx", int_arm_uhasx>;
+def t2UHADD16 : T2I_pam_intrinsics<0b001, 0b0110, "uhadd16", int_arm_uhadd16>;
+def t2UHADD8 : T2I_pam_intrinsics<0b000, 0b0110, "uhadd8", int_arm_uhadd8>;
+def t2UHSAX : T2I_pam_intrinsics<0b110, 0b0110, "uhsax", int_arm_uhsax>;
+def t2UHSUB16 : T2I_pam_intrinsics<0b101, 0b0110, "uhsub16", int_arm_uhsub16>;
+def t2UHSUB8 : T2I_pam_intrinsics<0b100, 0b0110, "uhsub8", int_arm_uhsub8>;
// Helper class for disassembly only
// A6.3.16 & A6.3.17
@@ -2255,16 +2273,19 @@ class T2FourReg_mac<bit long, bits<3> op22_20, bits<4> op7_4, dag oops,
// Unsigned Sum of Absolute Differences [and Accumulate].
def t2USAD8 : T2ThreeReg_mac<0, 0b111, 0b0000, (outs rGPR:$Rd),
(ins rGPR:$Rn, rGPR:$Rm),
- NoItinerary, "usad8", "\t$Rd, $Rn, $Rm", []>,
+ NoItinerary, "usad8", "\t$Rd, $Rn, $Rm",
+ [(set rGPR:$Rd, (int_arm_usad8 rGPR:$Rn, rGPR:$Rm))]>,
Requires<[IsThumb2, HasDSP]> {
let Inst{15-12} = 0b1111;
}
def t2USADA8 : T2FourReg_mac<0, 0b111, 0b0000, (outs rGPR:$Rd),
(ins rGPR:$Rn, rGPR:$Rm, rGPR:$Ra), NoItinerary,
- "usada8", "\t$Rd, $Rn, $Rm, $Ra", []>,
+ "usada8", "\t$Rd, $Rn, $Rm, $Ra",
+ [(set rGPR:$Rd, (int_arm_usada8 rGPR:$Rn, rGPR:$Rm, rGPR:$Ra))]>,
Requires<[IsThumb2, HasDSP]>;
// Signed/Unsigned saturate.
+let hasSideEffects = 1 in
class T2SatI<dag iops, string opc, string asm>
: T2I<(outs rGPR:$Rd), iops, NoItinerary, opc, asm, []> {
bits<4> Rd;
@@ -2313,10 +2334,16 @@ def t2USAT16: T2SatI<(ins imm0_15:$sat_imm, rGPR:$Rn),
let Inst{4} = 0;
}
-def : T2Pat<(int_arm_ssat GPR:$a, imm1_32:$pos), (t2SSAT imm1_32:$pos, GPR:$a, 0)>;
-def : T2Pat<(int_arm_usat GPR:$a, imm0_31:$pos), (t2USAT imm0_31:$pos, GPR:$a, 0)>;
def : T2Pat<(ARMssatnoshift GPRnopc:$Rn, imm0_31:$imm),
(t2SSAT imm0_31:$imm, GPRnopc:$Rn, 0)>;
+def : T2Pat<(int_arm_ssat GPR:$a, imm1_32:$pos),
+ (t2SSAT imm1_32:$pos, GPR:$a, 0)>;
+def : T2Pat<(int_arm_usat GPR:$a, imm0_31:$pos),
+ (t2USAT imm0_31:$pos, GPR:$a, 0)>;
+def : T2Pat<(int_arm_ssat16 GPR:$a, imm1_16:$pos),
+ (t2SSAT16 imm1_16:$pos, GPR:$a)>;
+def : T2Pat<(int_arm_usat16 GPR:$a, imm0_15:$pos),
+ (t2USAT16 imm0_15:$pos, GPR:$a)>;
//===----------------------------------------------------------------------===//
// Shift and rotate Instructions.
@@ -2689,6 +2716,18 @@ def : Thumb2DSPPat<(mul sext_16_node:$Rn, (sra rGPR:$Rm, (i32 16))),
(t2SMULBT rGPR:$Rn, rGPR:$Rm)>;
def : Thumb2DSPPat<(mul (sra rGPR:$Rn, (i32 16)), sext_16_node:$Rm),
(t2SMULTB rGPR:$Rn, rGPR:$Rm)>;
+def : Thumb2DSPPat<(int_arm_smulbb rGPR:$Rn, rGPR:$Rm),
+ (t2SMULBB rGPR:$Rn, rGPR:$Rm)>;
+def : Thumb2DSPPat<(int_arm_smulbt rGPR:$Rn, rGPR:$Rm),
+ (t2SMULBT rGPR:$Rn, rGPR:$Rm)>;
+def : Thumb2DSPPat<(int_arm_smultb rGPR:$Rn, rGPR:$Rm),
+ (t2SMULTB rGPR:$Rn, rGPR:$Rm)>;
+def : Thumb2DSPPat<(int_arm_smultt rGPR:$Rn, rGPR:$Rm),
+ (t2SMULTT rGPR:$Rn, rGPR:$Rm)>;
+def : Thumb2DSPPat<(int_arm_smulwb rGPR:$Rn, rGPR:$Rm),
+ (t2SMULWB rGPR:$Rn, rGPR:$Rm)>;
+def : Thumb2DSPPat<(int_arm_smulwt rGPR:$Rn, rGPR:$Rm),
+ (t2SMULWT rGPR:$Rn, rGPR:$Rm)>;
class T2FourRegSMLA<bits<3> op22_20, bits<2> op5_4, string opc,
list<dag> pattern>
@@ -2730,6 +2769,19 @@ def : Thumb2DSPMulPat<(add rGPR:$Ra,
(mul (sra rGPR:$Rn, (i32 16)), sext_16_node:$Rm)),
(t2SMLATB rGPR:$Rn, rGPR:$Rm, rGPR:$Ra)>;
+def : Thumb2DSPPat<(int_arm_smlabb GPR:$a, GPR:$b, GPR:$acc),
+ (t2SMLABB GPR:$a, GPR:$b, GPR:$acc)>;
+def : Thumb2DSPPat<(int_arm_smlabt GPR:$a, GPR:$b, GPR:$acc),
+ (t2SMLABT GPR:$a, GPR:$b, GPR:$acc)>;
+def : Thumb2DSPPat<(int_arm_smlatb GPR:$a, GPR:$b, GPR:$acc),
+ (t2SMLATB GPR:$a, GPR:$b, GPR:$acc)>;
+def : Thumb2DSPPat<(int_arm_smlatt GPR:$a, GPR:$b, GPR:$acc),
+ (t2SMLATT GPR:$a, GPR:$b, GPR:$acc)>;
+def : Thumb2DSPPat<(int_arm_smlawb GPR:$a, GPR:$b, GPR:$acc),
+ (t2SMLAWB GPR:$a, GPR:$b, GPR:$acc)>;
+def : Thumb2DSPPat<(int_arm_smlawt GPR:$a, GPR:$b, GPR:$acc),
+ (t2SMLAWT GPR:$a, GPR:$b, GPR:$acc)>;
+
// Halfword multiple accumulate long: SMLAL<x><y>
def t2SMLALBB : T2MlaLong<0b100, 0b1000, "smlalbb">,
Requires<[IsThumb2, HasDSP]>;
@@ -2749,39 +2801,44 @@ def : Thumb2DSPPat<(ARMsmlaltb GPR:$Rn, GPR:$Rm, GPR:$RLo, GPR:$RHi),
def : Thumb2DSPPat<(ARMsmlaltt GPR:$Rn, GPR:$Rm, GPR:$RLo, GPR:$RHi),
(t2SMLALTT $Rn, $Rm, $RLo, $RHi)>;
-class T2DualHalfMul<bits<3> op22_20, bits<4> op7_4, string opc>
+class T2DualHalfMul<bits<3> op22_20, bits<4> op7_4, string opc,
+ Intrinsic intrinsic>
: T2ThreeReg_mac<0, op22_20, op7_4,
(outs rGPR:$Rd),
(ins rGPR:$Rn, rGPR:$Rm),
- IIC_iMAC32, opc, "\t$Rd, $Rn, $Rm", []>,
+ IIC_iMAC32, opc, "\t$Rd, $Rn, $Rm",
+ [(set rGPR:$Rd, (intrinsic rGPR:$Rn, rGPR:$Rm))]>,
Requires<[IsThumb2, HasDSP]>,
Sched<[WriteMAC32, ReadMUL, ReadMUL, ReadMAC]> {
let Inst{15-12} = 0b1111;
}
// Dual halfword multiple: SMUAD, SMUSD, SMLAD, SMLSD, SMLALD, SMLSLD
-def t2SMUAD: T2DualHalfMul<0b010, 0b0000, "smuad">;
-def t2SMUADX: T2DualHalfMul<0b010, 0b0001, "smuadx">;
-def t2SMUSD: T2DualHalfMul<0b100, 0b0000, "smusd">;
-def t2SMUSDX: T2DualHalfMul<0b100, 0b0001, "smusdx">;
+def t2SMUAD: T2DualHalfMul<0b010, 0b0000, "smuad", int_arm_smuad>;
+def t2SMUADX: T2DualHalfMul<0b010, 0b0001, "smuadx", int_arm_smuadx>;
+def t2SMUSD: T2DualHalfMul<0b100, 0b0000, "smusd", int_arm_smusd>;
+def t2SMUSDX: T2DualHalfMul<0b100, 0b0001, "smusdx", int_arm_smusdx>;
-class T2DualHalfMulAdd<bits<3> op22_20, bits<4> op7_4, string opc>
+class T2DualHalfMulAdd<bits<3> op22_20, bits<4> op7_4, string opc,
+ Intrinsic intrinsic>
: T2FourReg_mac<0, op22_20, op7_4,
(outs rGPR:$Rd),
(ins rGPR:$Rn, rGPR:$Rm, rGPR:$Ra),
- IIC_iMAC32, opc, "\t$Rd, $Rn, $Rm, $Ra", []>,
+ IIC_iMAC32, opc, "\t$Rd, $Rn, $Rm, $Ra",
+ [(set rGPR:$Rd, (intrinsic rGPR:$Rn, rGPR:$Rm, rGPR:$Ra))]>,
Requires<[IsThumb2, HasDSP]>;
-def t2SMLAD : T2DualHalfMulAdd<0b010, 0b0000, "smlad">;
-def t2SMLADX : T2DualHalfMulAdd<0b010, 0b0001, "smladx">;
-def t2SMLSD : T2DualHalfMulAdd<0b100, 0b0000, "smlsd">;
-def t2SMLSDX : T2DualHalfMulAdd<0b100, 0b0001, "smlsdx">;
+def t2SMLAD : T2DualHalfMulAdd<0b010, 0b0000, "smlad", int_arm_smlad>;
+def t2SMLADX : T2DualHalfMulAdd<0b010, 0b0001, "smladx", int_arm_smladx>;
+def t2SMLSD : T2DualHalfMulAdd<0b100, 0b0000, "smlsd", int_arm_smlsd>;
+def t2SMLSDX : T2DualHalfMulAdd<0b100, 0b0001, "smlsdx", int_arm_smlsdx>;
class T2DualHalfMulAddLong<bits<3> op22_20, bits<4> op7_4, string opc>
: T2FourReg_mac<1, op22_20, op7_4,
(outs rGPR:$Ra, rGPR:$Rd),
- (ins rGPR:$Rn, rGPR:$Rm),
+ (ins rGPR:$Rn, rGPR:$Rm, rGPR:$RLo, rGPR:$RHi),
IIC_iMAC64, opc, "\t$Ra, $Rd, $Rn, $Rm", []>,
+ RegConstraint<"$Ra = $RLo, $Rd = $RHi">,
Requires<[IsThumb2, HasDSP]>,
Sched<[WriteMAC64Lo, WriteMAC64Hi, ReadMUL, ReadMUL, ReadMAC, ReadMAC]>;
@@ -2790,6 +2847,15 @@ def t2SMLALDX : T2DualHalfMulAddLong<0b100, 0b1101, "smlaldx">;
def t2SMLSLD : T2DualHalfMulAddLong<0b101, 0b1100, "smlsld">;
def t2SMLSLDX : T2DualHalfMulAddLong<0b101, 0b1101, "smlsldx">;
+def : Thumb2DSPPat<(ARMSmlald rGPR:$Rn, rGPR:$Rm, rGPR:$RLo, rGPR:$RHi),
+ (t2SMLALD rGPR:$Rn, rGPR:$Rm, rGPR:$RLo, rGPR:$RHi)>;
+def : Thumb2DSPPat<(ARMSmlaldx rGPR:$Rn, rGPR:$Rm, rGPR:$RLo, rGPR:$RHi),
+ (t2SMLALDX rGPR:$Rn, rGPR:$Rm, rGPR:$RLo, rGPR:$RHi)>;
+def : Thumb2DSPPat<(ARMSmlsld rGPR:$Rn, rGPR:$Rm, rGPR:$RLo, rGPR:$RHi),
+ (t2SMLSLD rGPR:$Rn, rGPR:$Rm, rGPR:$RLo, rGPR:$RHi)>;
+def : Thumb2DSPPat<(ARMSmlsldx rGPR:$Rn, rGPR:$Rm, rGPR:$RLo, rGPR:$RHi),
+ (t2SMLSLDX rGPR:$Rn, rGPR:$Rm, rGPR:$RLo, rGPR:$RHi)>;
+
//===----------------------------------------------------------------------===//
// Division Instructions.
// Signed and unsigned division on v7-M
@@ -4640,6 +4706,19 @@ def : t2InstSubst<"and${s}${p} $Rd, $Rn, $imm",
def : t2InstSubst<"and${s}${p} $Rdn, $imm",
(t2BICri rGPR:$Rdn, rGPR:$Rdn, t2_so_imm_not:$imm,
pred:$p, cc_out:$s)>;
+// And ORR <--> ORN
+def : t2InstSubst<"orn${s}${p} $Rd, $Rn, $imm",
+ (t2ORRri rGPR:$Rd, rGPR:$Rn, t2_so_imm_not:$imm,
+ pred:$p, cc_out:$s)>;
+def : t2InstSubst<"orn${s}${p} $Rdn, $imm",
+ (t2ORRri rGPR:$Rdn, rGPR:$Rdn, t2_so_imm_not:$imm,
+ pred:$p, cc_out:$s)>;
+def : t2InstSubst<"orr${s}${p} $Rd, $Rn, $imm",
+ (t2ORNri rGPR:$Rd, rGPR:$Rn, t2_so_imm_not:$imm,
+ pred:$p, cc_out:$s)>;
+def : t2InstSubst<"orr${s}${p} $Rdn, $imm",
+ (t2ORNri rGPR:$Rdn, rGPR:$Rdn, t2_so_imm_not:$imm,
+ pred:$p, cc_out:$s)>;
// Likewise, "add Rd, t2_so_imm_neg" -> sub
def : t2InstSubst<"add${s}${p} $Rd, $Rn, $imm",
(t2SUBri GPRnopc:$Rd, GPRnopc:$Rn, t2_so_imm_neg:$imm,
diff --git a/lib/Target/ARM/ARMRegisterBankInfo.cpp b/lib/Target/ARM/ARMRegisterBankInfo.cpp
index 7325817d446b..13a32211f88c 100644
--- a/lib/Target/ARM/ARMRegisterBankInfo.cpp
+++ b/lib/Target/ARM/ARMRegisterBankInfo.cpp
@@ -196,14 +196,14 @@ const RegisterBank &ARMRegisterBankInfo::getRegBankFromRegClass(
llvm_unreachable("Switch should handle all register classes");
}
-RegisterBankInfo::InstructionMapping
+const RegisterBankInfo::InstructionMapping &
ARMRegisterBankInfo::getInstrMapping(const MachineInstr &MI) const {
auto Opc = MI.getOpcode();
// Try the default logic for non-generic instructions that are either copies
// or already have some operands assigned to banks.
if (!isPreISelGenericOpcode(Opc)) {
- InstructionMapping Mapping = getInstrMappingImpl(MI);
+ const InstructionMapping &Mapping = getInstrMappingImpl(MI);
if (Mapping.isValid())
return Mapping;
}
@@ -258,7 +258,7 @@ ARMRegisterBankInfo::getInstrMapping(const MachineInstr &MI) const {
LLT Ty2 = MRI.getType(MI.getOperand(3).getReg());
if (Ty.getSizeInBits() != 64 || Ty1.getSizeInBits() != 32 ||
Ty2.getSizeInBits() != 32)
- return InstructionMapping{};
+ return getInvalidInstructionMapping();
OperandsMapping =
getOperandsMapping({&ARM::ValueMappings[ARM::DPR3OpsIdx],
&ARM::ValueMappings[ARM::GPR3OpsIdx], nullptr,
@@ -271,14 +271,14 @@ ARMRegisterBankInfo::getInstrMapping(const MachineInstr &MI) const {
LLT Ty1 = MRI.getType(MI.getOperand(1).getReg());
if (Ty.getSizeInBits() != 32 || Ty1.getSizeInBits() != 64 ||
MI.getOperand(2).getImm() % 32 != 0)
- return InstructionMapping{};
+ return getInvalidInstructionMapping();
OperandsMapping = getOperandsMapping({&ARM::ValueMappings[ARM::GPR3OpsIdx],
&ARM::ValueMappings[ARM::DPR3OpsIdx],
nullptr, nullptr});
break;
}
default:
- return InstructionMapping{};
+ return getInvalidInstructionMapping();
}
#ifndef NDEBUG
@@ -292,6 +292,6 @@ ARMRegisterBankInfo::getInstrMapping(const MachineInstr &MI) const {
}
#endif
- return InstructionMapping{DefaultMappingID, /*Cost=*/1, OperandsMapping,
- NumOperands};
+ return getInstructionMapping(DefaultMappingID, /*Cost=*/1, OperandsMapping,
+ NumOperands);
}
diff --git a/lib/Target/ARM/ARMRegisterBankInfo.h b/lib/Target/ARM/ARMRegisterBankInfo.h
index 5222c1e6389f..9650b358f319 100644
--- a/lib/Target/ARM/ARMRegisterBankInfo.h
+++ b/lib/Target/ARM/ARMRegisterBankInfo.h
@@ -36,7 +36,8 @@ public:
const RegisterBank &
getRegBankFromRegClass(const TargetRegisterClass &RC) const override;
- InstructionMapping getInstrMapping(const MachineInstr &MI) const override;
+ const InstructionMapping &
+ getInstrMapping(const MachineInstr &MI) const override;
};
} // End llvm namespace.
#endif
diff --git a/lib/Target/ARM/MCTargetDesc/ARMTargetStreamer.cpp b/lib/Target/ARM/MCTargetDesc/ARMTargetStreamer.cpp
index 2b0cd461df7a..4a943187ab6d 100644
--- a/lib/Target/ARM/MCTargetDesc/ARMTargetStreamer.cpp
+++ b/lib/Target/ARM/MCTargetDesc/ARMTargetStreamer.cpp
@@ -38,6 +38,7 @@ const MCExpr *ARMTargetStreamer::addConstantPoolEntry(const MCExpr *Expr, SMLoc
void ARMTargetStreamer::emitCurrentConstantPool() {
ConstantPools->emitForCurrentSection(Streamer);
+ ConstantPools->clearCacheForCurrentSection(Streamer);
}
// finish() - write out any non-empty assembler constant pools.
diff --git a/lib/Target/BPF/MCTargetDesc/BPFAsmBackend.cpp b/lib/Target/BPF/MCTargetDesc/BPFAsmBackend.cpp
index 1f355171ebd3..80357a63a4e1 100644
--- a/lib/Target/BPF/MCTargetDesc/BPFAsmBackend.cpp
+++ b/lib/Target/BPF/MCTargetDesc/BPFAsmBackend.cpp
@@ -70,7 +70,7 @@ void BPFAsmBackend::applyFixup(const MCFixup &Fixup, char *Data,
unsigned Size = Fixup.getKind() == FK_Data_4 ? 4 : 8;
for (unsigned i = 0; i != Size; ++i) {
- unsigned Idx = IsLittleEndian ? i : Size - i;
+ unsigned Idx = IsLittleEndian ? i : Size - i - 1;
Data[Fixup.getOffset() + Idx] = uint8_t(Value >> (i * 8));
}
} else {
diff --git a/lib/Target/Hexagon/Disassembler/HexagonDisassembler.cpp b/lib/Target/Hexagon/Disassembler/HexagonDisassembler.cpp
index 3396ddbe4fa6..87c212b6163f 100644
--- a/lib/Target/Hexagon/Disassembler/HexagonDisassembler.cpp
+++ b/lib/Target/Hexagon/Disassembler/HexagonDisassembler.cpp
@@ -553,7 +553,7 @@ static DecodeStatus DecodeCtrRegsRegisterClass(MCInst &Inst, unsigned RegNo,
using namespace Hexagon;
static const MCPhysReg CtrlRegDecoderTable[] = {
/* 0 */ SA0, LC0, SA1, LC1,
- /* 4 */ P3_0, C5, C6, C7,
+ /* 4 */ P3_0, C5, M0, M1,
/* 8 */ USR, PC, UGP, GP,
/* 12 */ CS0, CS1, UPCYCLELO, UPCYCLEHI,
/* 16 */ FRAMELIMIT, FRAMEKEY, PKTCOUNTLO, PKTCOUNTHI,
diff --git a/lib/Target/Hexagon/HexagonDepIICHVX.td b/lib/Target/Hexagon/HexagonDepIICHVX.td
new file mode 100644
index 000000000000..1c1788264c66
--- /dev/null
+++ b/lib/Target/Hexagon/HexagonDepIICHVX.td
@@ -0,0 +1,1143 @@
+//===--- HexagonDepIICHVX.td ----------------------------------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+def tc_0317c6ca : InstrItinClass;
+def tc_1b93bdc6 : InstrItinClass;
+def tc_2171ebae : InstrItinClass;
+def tc_28978789 : InstrItinClass;
+def tc_316c637c : InstrItinClass;
+def tc_354299ad : InstrItinClass;
+def tc_35e92f8e : InstrItinClass;
+def tc_38208312 : InstrItinClass;
+def tc_4105d6b5 : InstrItinClass;
+def tc_41f4b64e : InstrItinClass;
+def tc_41f99e1c : InstrItinClass;
+def tc_45453b98 : InstrItinClass;
+def tc_4e2a5159 : InstrItinClass;
+def tc_4fd8566e : InstrItinClass;
+def tc_51cd3aab : InstrItinClass;
+def tc_5a9fc4ec : InstrItinClass;
+def tc_5c120602 : InstrItinClass;
+def tc_5cbf490b : InstrItinClass;
+def tc_644584f8 : InstrItinClass;
+def tc_69b6dd20 : InstrItinClass;
+def tc_6b78cf13 : InstrItinClass;
+def tc_6fd9ad30 : InstrItinClass;
+def tc_71337255 : InstrItinClass;
+def tc_72ad7b54 : InstrItinClass;
+def tc_77a4c701 : InstrItinClass;
+def tc_7c3f55c4 : InstrItinClass;
+def tc_7e9f581b : InstrItinClass;
+def tc_7fa82b08 : InstrItinClass;
+def tc_7fa8b40f : InstrItinClass;
+def tc_85d237e3 : InstrItinClass;
+def tc_8b6a873f : InstrItinClass;
+def tc_908a4c8c : InstrItinClass;
+def tc_9311da3f : InstrItinClass;
+def tc_9777e6bf : InstrItinClass;
+def tc_97c165b9 : InstrItinClass;
+def tc_99093773 : InstrItinClass;
+def tc_9b9642a1 : InstrItinClass;
+def tc_9c267309 : InstrItinClass;
+def tc_a3127e12 : InstrItinClass;
+def tc_a4c9df3b : InstrItinClass;
+def tc_aedb9f9e : InstrItinClass;
+def tc_b06ab583 : InstrItinClass;
+def tc_b712833a : InstrItinClass;
+def tc_b77635b4 : InstrItinClass;
+def tc_bbaf280e : InstrItinClass;
+def tc_bf142ae2 : InstrItinClass;
+def tc_c00bf9c9 : InstrItinClass;
+def tc_c4b515c5 : InstrItinClass;
+def tc_cbf6d1dc : InstrItinClass;
+def tc_cedf314b : InstrItinClass;
+def tc_d2cb81ea : InstrItinClass;
+def tc_d5090f3e : InstrItinClass;
+def tc_d642eff3 : InstrItinClass;
+def tc_d725e5b0 : InstrItinClass;
+def tc_d7bea0ec : InstrItinClass;
+def tc_d98f4d63 : InstrItinClass;
+def tc_da979fb3 : InstrItinClass;
+def tc_db5b9e2f : InstrItinClass;
+def tc_e172d86a : InstrItinClass;
+def tc_e231aa4f : InstrItinClass;
+def tc_e3748cdf : InstrItinClass;
+def tc_e5053c8f : InstrItinClass;
+def tc_e6299d16 : InstrItinClass;
+def tc_eb669007 : InstrItinClass;
+def tc_eda67dcd : InstrItinClass;
+def tc_f3fc3f83 : InstrItinClass;
+
+class DepHVXItinV55 {
+ list<InstrItinData> DepHVXItinV55_list = [
+ InstrItinData <tc_0317c6ca, /*SLOT0,STORE,VA*/
+ [InstrStage<1, [SLOT0], 0>,
+ InstrStage<1, [CVI_ST], 0>,
+ InstrStage<1, [CVI_MPY0, CVI_MPY1, CVI_SHIFT, CVI_XLANE]>], [3, 2, 1, 2, 7],
+ [Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD, HVX_FWD]>,
+
+ InstrItinData <tc_1b93bdc6, /*SLOT0,STORE*/
+ [InstrStage<1, [SLOT0], 0>,
+ InstrStage<1, [CVI_ST]>], [1, 2, 5],
+ [Hex_FWD, Hex_FWD, HVX_FWD]>,
+
+ InstrItinData <tc_2171ebae, /*SLOT0123,VA_DV*/
+ [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3], 0>,
+ InstrStage<1, [CVI_MPY01, CVI_XLSHF]>], [9, 2, 7, 7],
+ [HVX_FWD, Hex_FWD, HVX_FWD, HVX_FWD]>,
+
+ InstrItinData <tc_28978789, /*SLOT0123,4SLOT*/
+ [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3], 0>,
+ InstrStage<1, [CVI_ALL]>], [3, 2],
+ [HVX_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_316c637c, /*SLOT0123,VA_DV*/
+ [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3], 0>,
+ InstrStage<1, [CVI_MPY01, CVI_XLSHF]>], [9, 7, 7, 7],
+ [HVX_FWD, HVX_FWD, HVX_FWD, HVX_FWD]>,
+
+ InstrItinData <tc_354299ad, /*SLOT0,NOSLOT1,STORE,VP*/
+ [InstrStage<1, [SLOT0], 0>,
+ InstrStage<1, [SLOT1], 0>,
+ InstrStage<1, [CVI_ST], 0>,
+ InstrStage<1, [CVI_XLANE]>], [1, 2, 5],
+ [Hex_FWD, Hex_FWD, HVX_FWD]>,
+
+ InstrItinData <tc_35e92f8e, /*SLOT0,NOSLOT1,LOAD,VP*/
+ [InstrStage<1, [SLOT0], 0>,
+ InstrStage<1, [SLOT1], 0>,
+ InstrStage<1, [CVI_LD], 0>,
+ InstrStage<1, [CVI_XLANE]>], [9, 1, 2],
+ [HVX_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_38208312, /*SLOT01,LOAD*/
+ [InstrStage<1, [SLOT0, SLOT1], 0>,
+ InstrStage<1, [CVI_LD]>], [9, 3, 2, 1, 2],
+ [HVX_FWD, Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_4105d6b5, /*SLOT0123,VP*/
+ [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3], 0>,
+ InstrStage<1, [CVI_XLANE]>], [9, 2],
+ [HVX_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_41f4b64e, /*SLOT0123,VS*/
+ [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3], 0>,
+ InstrStage<1, [CVI_SHIFT]>], [9, 5, 2],
+ [HVX_FWD, HVX_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_41f99e1c, /*SLOT23,VX_DV*/
+ [InstrStage<1, [SLOT2, SLOT3], 0>,
+ InstrStage<1, [CVI_MPY01]>], [9, 7, 5, 2, 2],
+ [HVX_FWD, HVX_FWD, HVX_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_45453b98, /*SLOT0123,VS*/
+ [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3], 0>,
+ InstrStage<1, [CVI_SHIFT]>], [9, 5, 5],
+ [HVX_FWD, HVX_FWD, HVX_FWD]>,
+
+ InstrItinData <tc_4e2a5159, /*SLOT0123,VP_VS*/
+ [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3], 0>,
+ InstrStage<1, [CVI_XLSHF]>], [9, 5, 5, 2],
+ [HVX_FWD, HVX_FWD, HVX_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_4fd8566e, /*SLOT0,NOSLOT1,LOAD,VP*/
+ [InstrStage<1, [SLOT0], 0>,
+ InstrStage<1, [SLOT1], 0>,
+ InstrStage<1, [CVI_LD], 0>,
+ InstrStage<1, [CVI_XLANE]>], [9, 3, 1, 2],
+ [HVX_FWD, Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_51cd3aab, /*SLOT01,LOAD*/
+ [InstrStage<1, [SLOT0, SLOT1], 0>,
+ InstrStage<1, [CVI_LD]>], [9, 2, 1, 2],
+ [HVX_FWD, Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_5a9fc4ec, /*SLOT0123,VA*/
+ [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3], 0>,
+ InstrStage<1, [CVI_MPY0, CVI_MPY1, CVI_SHIFT, CVI_XLANE]>], [9, 9, 7, 7, 7],
+ [HVX_FWD, HVX_FWD, HVX_FWD, HVX_FWD, HVX_FWD]>,
+
+ InstrItinData <tc_5c120602, /*SLOT0123,VP_VS*/
+ [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3], 0>,
+ InstrStage<1, [CVI_XLSHF]>], [9, 9, 5, 5, 2],
+ [HVX_FWD, HVX_FWD, HVX_FWD, HVX_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_5cbf490b, /*SLOT01,LOAD,VA*/
+ [InstrStage<1, [SLOT0, SLOT1], 0>,
+ InstrStage<1, [CVI_LD], 0>,
+ InstrStage<1, [CVI_MPY0, CVI_MPY1, CVI_SHIFT, CVI_XLANE]>], [9, 2, 1, 2],
+ [HVX_FWD, Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_644584f8, /*SLOT0123,VA_DV*/
+ [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3], 0>,
+ InstrStage<1, [CVI_MPY01, CVI_XLSHF]>], [9, 7],
+ [HVX_FWD, HVX_FWD]>,
+
+ InstrItinData <tc_69b6dd20, /*SLOT23,VX*/
+ [InstrStage<1, [SLOT2, SLOT3], 0>,
+ InstrStage<1, [CVI_MPY0, CVI_MPY1]>], [9, 5, 2],
+ [HVX_FWD, HVX_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_6b78cf13, /*SLOT23,VX*/
+ [InstrStage<1, [SLOT2, SLOT3], 0>,
+ InstrStage<1, [CVI_MPY0, CVI_MPY1]>], [9, 2],
+ [HVX_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_6fd9ad30, /*SLOT0,NOSLOT1,STORE,VP*/
+ [InstrStage<1, [SLOT0], 0>,
+ InstrStage<1, [SLOT1], 0>,
+ InstrStage<1, [CVI_ST], 0>,
+ InstrStage<1, [CVI_XLANE]>], [3, 2, 1, 2, 5],
+ [Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD, HVX_FWD]>,
+
+ InstrItinData <tc_71337255, /*SLOT0123,VA*/
+ [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3], 0>,
+ InstrStage<1, [CVI_MPY0, CVI_MPY1, CVI_SHIFT, CVI_XLANE]>], [9, 7],
+ [HVX_FWD, HVX_FWD]>,
+
+ InstrItinData <tc_72ad7b54, /*SLOT0123,VP_VS*/
+ [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3], 0>,
+ InstrStage<1, [CVI_XLSHF]>], [9, 7, 5],
+ [HVX_FWD, HVX_FWD, HVX_FWD]>,
+
+ InstrItinData <tc_77a4c701, /*SLOT01,LOAD*/
+ [InstrStage<1, [SLOT0, SLOT1], 0>,
+ InstrStage<1, [CVI_LD]>], [9, 1, 2],
+ [HVX_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_7c3f55c4, /*SLOT23,VX_DV*/
+ [InstrStage<1, [SLOT2, SLOT3], 0>,
+ InstrStage<1, [CVI_MPY01]>], [9, 5, 2],
+ [HVX_FWD, HVX_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_7e9f581b, /*SLOT23,VX_DV*/
+ [InstrStage<1, [SLOT2, SLOT3], 0>,
+ InstrStage<1, [CVI_MPY01]>], [9, 5, 2, 2],
+ [HVX_FWD, HVX_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_7fa82b08, /*SLOT0,NOSLOT1,STORE,VP*/
+ [InstrStage<1, [SLOT0], 0>,
+ InstrStage<1, [SLOT1], 0>,
+ InstrStage<1, [CVI_ST], 0>,
+ InstrStage<1, [CVI_XLANE]>], [3, 1, 2, 5],
+ [Hex_FWD, Hex_FWD, Hex_FWD, HVX_FWD]>,
+
+ InstrItinData <tc_7fa8b40f, /*SLOT0123,VS*/
+ [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3], 0>,
+ InstrStage<1, [CVI_SHIFT]>], [9, 5, 5, 2],
+ [HVX_FWD, HVX_FWD, HVX_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_85d237e3, /*SLOT0,STORE,VA*/
+ [InstrStage<1, [SLOT0], 0>,
+ InstrStage<1, [CVI_ST], 0>,
+ InstrStage<1, [CVI_MPY0, CVI_MPY1, CVI_SHIFT, CVI_XLANE]>], [2, 1, 2, 7],
+ [Hex_FWD, Hex_FWD, Hex_FWD, HVX_FWD]>,
+
+ InstrItinData <tc_8b6a873f, /*SLOT0,STORE*/
+ [InstrStage<1, [SLOT0], 0>,
+ InstrStage<1, [CVI_ST]>], [3, 2, 1, 2, 5],
+ [Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD, HVX_FWD]>,
+
+ InstrItinData <tc_908a4c8c, /*SLOT23,VX*/
+ [InstrStage<1, [SLOT2, SLOT3], 0>,
+ InstrStage<1, [CVI_MPY0, CVI_MPY1]>], [9, 5, 5],
+ [HVX_FWD, HVX_FWD, HVX_FWD]>,
+
+ InstrItinData <tc_9311da3f, /*SLOT23,VX*/
+ [InstrStage<1, [SLOT2, SLOT3], 0>,
+ InstrStage<1, [CVI_MPY0, CVI_MPY1]>], [9, 7, 7, 2],
+ [HVX_FWD, HVX_FWD, HVX_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_9777e6bf, /*SLOT0,VA*/
+ [InstrStage<1, [SLOT0], 0>,
+ InstrStage<1, [CVI_MPY0, CVI_MPY1, CVI_SHIFT, CVI_XLANE]>], [4, 7, 1],
+ [Hex_FWD, HVX_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_97c165b9, /*SLOT0123,VA_DV*/
+ [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3], 0>,
+ InstrStage<1, [CVI_MPY01, CVI_XLSHF]>], [9, 7, 7],
+ [HVX_FWD, HVX_FWD, HVX_FWD]>,
+
+ InstrItinData <tc_99093773, /*SLOT0,STORE,VA*/
+ [InstrStage<1, [SLOT0], 0>,
+ InstrStage<1, [CVI_ST], 0>,
+ InstrStage<1, [CVI_MPY0, CVI_MPY1, CVI_SHIFT, CVI_XLANE]>], [3, 7, 1, 2, 7],
+ [Hex_FWD, HVX_FWD, Hex_FWD, Hex_FWD, HVX_FWD]>,
+
+ InstrItinData <tc_9b9642a1, /*SLOT0123,VS*/
+ [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3], 0>,
+ InstrStage<1, [CVI_SHIFT]>], [9, 5, 5],
+ [HVX_FWD, HVX_FWD, HVX_FWD]>,
+
+ InstrItinData <tc_9c267309, /*SLOT01,LOAD*/
+ [InstrStage<1, [SLOT0, SLOT1], 0>,
+ InstrStage<1, [CVI_LD]>], [9, 3, 1, 2],
+ [HVX_FWD, Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_a3127e12, /*SLOT0123,VA*/
+ [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3], 0>,
+ InstrStage<1, [CVI_MPY0, CVI_MPY1, CVI_SHIFT, CVI_XLANE]>], [9, 7, 7, 7],
+ [HVX_FWD, HVX_FWD, HVX_FWD, HVX_FWD]>,
+
+ InstrItinData <tc_a4c9df3b, /*SLOT0,STORE,VA*/
+ [InstrStage<1, [SLOT0], 0>,
+ InstrStage<1, [CVI_ST], 0>,
+ InstrStage<1, [CVI_MPY0, CVI_MPY1, CVI_SHIFT, CVI_XLANE]>], [3, 1, 2, 7],
+ [Hex_FWD, Hex_FWD, Hex_FWD, HVX_FWD]>,
+
+ InstrItinData <tc_aedb9f9e, /*SLOT0,STORE,VA*/
+ [InstrStage<1, [SLOT0], 0>,
+ InstrStage<1, [CVI_ST], 0>,
+ InstrStage<1, [CVI_MPY0, CVI_MPY1, CVI_SHIFT, CVI_XLANE]>], [7, 1, 2, 7],
+ [HVX_FWD, Hex_FWD, Hex_FWD, HVX_FWD]>,
+
+ InstrItinData <tc_b06ab583, /*SLOT0123,VA*/
+ [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3], 0>,
+ InstrStage<1, [CVI_MPY0, CVI_MPY1, CVI_SHIFT, CVI_XLANE]>], [9, 2, 7],
+ [HVX_FWD, Hex_FWD, HVX_FWD]>,
+
+ InstrItinData <tc_b712833a, /*SLOT01,LOAD,VA*/
+ [InstrStage<1, [SLOT0, SLOT1], 0>,
+ InstrStage<1, [CVI_LD], 0>,
+ InstrStage<1, [CVI_MPY0, CVI_MPY1, CVI_SHIFT, CVI_XLANE]>], [9, 1, 2],
+ [HVX_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_b77635b4, /*SLOT0123,4SLOT*/
+ [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3], 0>,
+ InstrStage<1, [CVI_ALL]>], [2],
+ [Hex_FWD]>,
+
+ InstrItinData <tc_bbaf280e, /*SLOT0123,VA*/
+ [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3], 0>,
+ InstrStage<1, [CVI_MPY0, CVI_MPY1, CVI_SHIFT, CVI_XLANE]>], [9, 7, 7],
+ [HVX_FWD, HVX_FWD, HVX_FWD]>,
+
+ InstrItinData <tc_bf142ae2, /*SLOT0123,VP*/
+ [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3], 0>,
+ InstrStage<1, [CVI_XLANE]>], [9, 5, 2],
+ [HVX_FWD, HVX_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_c00bf9c9, /*SLOT0123,VS*/
+ [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3], 0>,
+ InstrStage<1, [CVI_SHIFT]>], [9, 7, 5, 2],
+ [HVX_FWD, HVX_FWD, HVX_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_c4b515c5, /*SLOT0123,VP*/
+ [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3], 0>,
+ InstrStage<1, [CVI_XLANE]>], [9, 5, 5, 2],
+ [HVX_FWD, HVX_FWD, HVX_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_cbf6d1dc, /*SLOT0123,VP_VS*/
+ [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3], 0>,
+ InstrStage<1, [CVI_XLSHF]>], [9, 7, 5, 5, 2],
+ [HVX_FWD, HVX_FWD, HVX_FWD, HVX_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_cedf314b, /*SLOT0123,4SLOT*/
+ [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3], 0>,
+ InstrStage<1, [CVI_ALL]>], [3],
+ [HVX_FWD]>,
+
+ InstrItinData <tc_d2cb81ea, /*SLOT0123,VS*/
+ [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3], 0>,
+ InstrStage<1, [CVI_SHIFT]>], [9, 5],
+ [HVX_FWD, HVX_FWD]>,
+
+ InstrItinData <tc_d5090f3e, /*SLOT0,STORE*/
+ [InstrStage<1, [SLOT0], 0>,
+ InstrStage<1, [CVI_ST]>], [2, 1, 2, 5],
+ [Hex_FWD, Hex_FWD, Hex_FWD, HVX_FWD]>,
+
+ InstrItinData <tc_d642eff3, /*SLOT0,NOSLOT1,STORE,VP*/
+ [InstrStage<1, [SLOT0], 0>,
+ InstrStage<1, [SLOT1], 0>,
+ InstrStage<1, [CVI_ST], 0>,
+ InstrStage<1, [CVI_XLANE]>], [2, 1, 2, 5],
+ [Hex_FWD, Hex_FWD, Hex_FWD, HVX_FWD]>,
+
+ InstrItinData <tc_d725e5b0, /*SLOT23,VX*/
+ [InstrStage<1, [SLOT2, SLOT3], 0>,
+ InstrStage<1, [CVI_MPY0, CVI_MPY1]>], [9, 7, 5, 2],
+ [HVX_FWD, HVX_FWD, HVX_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_d7bea0ec, /*SLOT0123,VP_VS*/
+ [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3], 0>,
+ InstrStage<1, [CVI_XLSHF]>], [9, 5],
+ [HVX_FWD, HVX_FWD]>,
+
+ InstrItinData <tc_d98f4d63, /*SLOT23,VX_DV*/
+ [InstrStage<1, [SLOT2, SLOT3], 0>,
+ InstrStage<1, [CVI_MPY01]>], [9, 7, 5, 2],
+ [HVX_FWD, HVX_FWD, HVX_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_da979fb3, /*SLOT01,LOAD,VA*/
+ [InstrStage<1, [SLOT0, SLOT1], 0>,
+ InstrStage<1, [CVI_LD], 0>,
+ InstrStage<1, [CVI_MPY0, CVI_MPY1, CVI_SHIFT, CVI_XLANE]>], [9, 3, 2, 1, 2],
+ [HVX_FWD, Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_db5b9e2f, /*SLOT0,STORE*/
+ [InstrStage<1, [SLOT0], 0>,
+ InstrStage<1, [CVI_ST]>], [3, 1, 2, 5],
+ [Hex_FWD, Hex_FWD, Hex_FWD, HVX_FWD]>,
+
+ InstrItinData <tc_e172d86a, /*SLOT23,VX_DV*/
+ [InstrStage<1, [SLOT2, SLOT3], 0>,
+ InstrStage<1, [CVI_MPY01]>], [9, 7, 5, 5],
+ [HVX_FWD, HVX_FWD, HVX_FWD, HVX_FWD]>,
+
+ InstrItinData <tc_e231aa4f, /*SLOT23,VX*/
+ [InstrStage<1, [SLOT2, SLOT3], 0>,
+ InstrStage<1, [CVI_MPY0, CVI_MPY1]>], [9, 7, 2],
+ [HVX_FWD, HVX_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_e3748cdf, /*SLOT0,STORE,VA*/
+ [InstrStage<1, [SLOT0], 0>,
+ InstrStage<1, [CVI_ST], 0>,
+ InstrStage<1, [CVI_MPY0, CVI_MPY1, CVI_SHIFT, CVI_XLANE]>], [1, 2, 7],
+ [Hex_FWD, Hex_FWD, HVX_FWD]>,
+
+ InstrItinData <tc_e5053c8f, /*SLOT0123,4SLOT*/
+ [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3], 0>,
+ InstrStage<1, [CVI_ALL]>], [],
+ []>,
+
+ InstrItinData <tc_e6299d16, /*SLOT0123,VP*/
+ [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3], 0>,
+ InstrStage<1, [CVI_XLANE]>], [9, 5],
+ [HVX_FWD, HVX_FWD]>,
+
+ InstrItinData <tc_eb669007, /*SLOT01,LOAD,VA*/
+ [InstrStage<1, [SLOT0, SLOT1], 0>,
+ InstrStage<1, [CVI_LD], 0>,
+ InstrStage<1, [CVI_MPY0, CVI_MPY1, CVI_SHIFT, CVI_XLANE]>], [9, 3, 1, 2],
+ [HVX_FWD, Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_eda67dcd, /*SLOT23,VX_DV*/
+ [InstrStage<1, [SLOT2, SLOT3], 0>,
+ InstrStage<1, [CVI_MPY01]>], [9, 5, 5],
+ [HVX_FWD, HVX_FWD, HVX_FWD]>,
+
+ InstrItinData <tc_f3fc3f83, /*SLOT0123,VP*/
+ [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3], 0>,
+ InstrStage<1, [CVI_XLANE]>], [9, 5, 5],
+ [HVX_FWD, HVX_FWD, HVX_FWD]>
+ ];
+}
+
+class DepHVXItinV60 {
+ list<InstrItinData> DepHVXItinV60_list = [
+ InstrItinData <tc_0317c6ca, /*SLOT0,STORE,VA*/
+ [InstrStage<1, [SLOT0], 0>,
+ InstrStage<1, [CVI_ST], 0>,
+ InstrStage<1, [CVI_MPY0, CVI_MPY1, CVI_SHIFT, CVI_XLANE]>], [3, 2, 1, 2, 7],
+ [Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD, HVX_FWD]>,
+
+ InstrItinData <tc_1b93bdc6, /*SLOT0,STORE*/
+ [InstrStage<1, [SLOT0], 0>,
+ InstrStage<1, [CVI_ST]>], [1, 2, 5],
+ [Hex_FWD, Hex_FWD, HVX_FWD]>,
+
+ InstrItinData <tc_2171ebae, /*SLOT0123,VA_DV*/
+ [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3], 0>,
+ InstrStage<1, [CVI_MPY01, CVI_XLSHF]>], [9, 2, 7, 7],
+ [HVX_FWD, Hex_FWD, HVX_FWD, HVX_FWD]>,
+
+ InstrItinData <tc_28978789, /*SLOT0123,4SLOT*/
+ [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3], 0>,
+ InstrStage<1, [CVI_ALL]>], [3, 2],
+ [HVX_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_316c637c, /*SLOT0123,VA_DV*/
+ [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3], 0>,
+ InstrStage<1, [CVI_MPY01, CVI_XLSHF]>], [9, 7, 7, 7],
+ [HVX_FWD, HVX_FWD, HVX_FWD, HVX_FWD]>,
+
+ InstrItinData <tc_354299ad, /*SLOT0,NOSLOT1,STORE,VP*/
+ [InstrStage<1, [SLOT0], 0>,
+ InstrStage<1, [SLOT1], 0>,
+ InstrStage<1, [CVI_ST], 0>,
+ InstrStage<1, [CVI_XLANE]>], [1, 2, 5],
+ [Hex_FWD, Hex_FWD, HVX_FWD]>,
+
+ InstrItinData <tc_35e92f8e, /*SLOT0,NOSLOT1,LOAD,VP*/
+ [InstrStage<1, [SLOT0], 0>,
+ InstrStage<1, [SLOT1], 0>,
+ InstrStage<1, [CVI_LD], 0>,
+ InstrStage<1, [CVI_XLANE]>], [9, 1, 2],
+ [HVX_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_38208312, /*SLOT01,LOAD*/
+ [InstrStage<1, [SLOT0, SLOT1], 0>,
+ InstrStage<1, [CVI_LD]>], [9, 3, 2, 1, 2],
+ [HVX_FWD, Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_4105d6b5, /*SLOT0123,VP*/
+ [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3], 0>,
+ InstrStage<1, [CVI_XLANE]>], [9, 2],
+ [HVX_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_41f4b64e, /*SLOT0123,VS*/
+ [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3], 0>,
+ InstrStage<1, [CVI_SHIFT]>], [9, 5, 2],
+ [HVX_FWD, HVX_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_41f99e1c, /*SLOT23,VX_DV*/
+ [InstrStage<1, [SLOT2, SLOT3], 0>,
+ InstrStage<1, [CVI_MPY01]>], [9, 7, 5, 2, 2],
+ [HVX_FWD, HVX_FWD, HVX_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_45453b98, /*SLOT0123,VS*/
+ [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3], 0>,
+ InstrStage<1, [CVI_SHIFT]>], [9, 5, 5],
+ [HVX_FWD, HVX_FWD, HVX_FWD]>,
+
+ InstrItinData <tc_4e2a5159, /*SLOT0123,VP_VS*/
+ [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3], 0>,
+ InstrStage<1, [CVI_XLSHF]>], [9, 5, 5, 2],
+ [HVX_FWD, HVX_FWD, HVX_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_4fd8566e, /*SLOT0,NOSLOT1,LOAD,VP*/
+ [InstrStage<1, [SLOT0], 0>,
+ InstrStage<1, [SLOT1], 0>,
+ InstrStage<1, [CVI_LD], 0>,
+ InstrStage<1, [CVI_XLANE]>], [9, 3, 1, 2],
+ [HVX_FWD, Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_51cd3aab, /*SLOT01,LOAD*/
+ [InstrStage<1, [SLOT0, SLOT1], 0>,
+ InstrStage<1, [CVI_LD]>], [9, 2, 1, 2],
+ [HVX_FWD, Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_5a9fc4ec, /*SLOT0123,VA*/
+ [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3], 0>,
+ InstrStage<1, [CVI_MPY0, CVI_MPY1, CVI_SHIFT, CVI_XLANE]>], [9, 9, 7, 7, 7],
+ [HVX_FWD, HVX_FWD, HVX_FWD, HVX_FWD, HVX_FWD]>,
+
+ InstrItinData <tc_5c120602, /*SLOT0123,VP_VS*/
+ [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3], 0>,
+ InstrStage<1, [CVI_XLSHF]>], [9, 9, 5, 5, 2],
+ [HVX_FWD, HVX_FWD, HVX_FWD, HVX_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_5cbf490b, /*SLOT01,LOAD,VA*/
+ [InstrStage<1, [SLOT0, SLOT1], 0>,
+ InstrStage<1, [CVI_LD], 0>,
+ InstrStage<1, [CVI_MPY0, CVI_MPY1, CVI_SHIFT, CVI_XLANE]>], [9, 2, 1, 2],
+ [HVX_FWD, Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_644584f8, /*SLOT0123,VA_DV*/
+ [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3], 0>,
+ InstrStage<1, [CVI_MPY01, CVI_XLSHF]>], [9, 7],
+ [HVX_FWD, HVX_FWD]>,
+
+ InstrItinData <tc_69b6dd20, /*SLOT23,VX*/
+ [InstrStage<1, [SLOT2, SLOT3], 0>,
+ InstrStage<1, [CVI_MPY0, CVI_MPY1]>], [9, 5, 2],
+ [HVX_FWD, HVX_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_6b78cf13, /*SLOT23,VX*/
+ [InstrStage<1, [SLOT2, SLOT3], 0>,
+ InstrStage<1, [CVI_MPY0, CVI_MPY1]>], [9, 2],
+ [HVX_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_6fd9ad30, /*SLOT0,NOSLOT1,STORE,VP*/
+ [InstrStage<1, [SLOT0], 0>,
+ InstrStage<1, [SLOT1], 0>,
+ InstrStage<1, [CVI_ST], 0>,
+ InstrStage<1, [CVI_XLANE]>], [3, 2, 1, 2, 5],
+ [Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD, HVX_FWD]>,
+
+ InstrItinData <tc_71337255, /*SLOT0123,VA*/
+ [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3], 0>,
+ InstrStage<1, [CVI_MPY0, CVI_MPY1, CVI_SHIFT, CVI_XLANE]>], [9, 7],
+ [HVX_FWD, HVX_FWD]>,
+
+ InstrItinData <tc_72ad7b54, /*SLOT0123,VP_VS*/
+ [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3], 0>,
+ InstrStage<1, [CVI_XLSHF]>], [9, 7, 5],
+ [HVX_FWD, HVX_FWD, HVX_FWD]>,
+
+ InstrItinData <tc_77a4c701, /*SLOT01,LOAD*/
+ [InstrStage<1, [SLOT0, SLOT1], 0>,
+ InstrStage<1, [CVI_LD]>], [9, 1, 2],
+ [HVX_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_7c3f55c4, /*SLOT23,VX_DV*/
+ [InstrStage<1, [SLOT2, SLOT3], 0>,
+ InstrStage<1, [CVI_MPY01]>], [9, 5, 2],
+ [HVX_FWD, HVX_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_7e9f581b, /*SLOT23,VX_DV*/
+ [InstrStage<1, [SLOT2, SLOT3], 0>,
+ InstrStage<1, [CVI_MPY01]>], [9, 5, 2, 2],
+ [HVX_FWD, HVX_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_7fa82b08, /*SLOT0,NOSLOT1,STORE,VP*/
+ [InstrStage<1, [SLOT0], 0>,
+ InstrStage<1, [SLOT1], 0>,
+ InstrStage<1, [CVI_ST], 0>,
+ InstrStage<1, [CVI_XLANE]>], [3, 1, 2, 5],
+ [Hex_FWD, Hex_FWD, Hex_FWD, HVX_FWD]>,
+
+ InstrItinData <tc_7fa8b40f, /*SLOT0123,VS*/
+ [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3], 0>,
+ InstrStage<1, [CVI_SHIFT]>], [9, 5, 5, 2],
+ [HVX_FWD, HVX_FWD, HVX_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_85d237e3, /*SLOT0,STORE,VA*/
+ [InstrStage<1, [SLOT0], 0>,
+ InstrStage<1, [CVI_ST], 0>,
+ InstrStage<1, [CVI_MPY0, CVI_MPY1, CVI_SHIFT, CVI_XLANE]>], [2, 1, 2, 7],
+ [Hex_FWD, Hex_FWD, Hex_FWD, HVX_FWD]>,
+
+ InstrItinData <tc_8b6a873f, /*SLOT0,STORE*/
+ [InstrStage<1, [SLOT0], 0>,
+ InstrStage<1, [CVI_ST]>], [3, 2, 1, 2, 5],
+ [Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD, HVX_FWD]>,
+
+ InstrItinData <tc_908a4c8c, /*SLOT23,VX*/
+ [InstrStage<1, [SLOT2, SLOT3], 0>,
+ InstrStage<1, [CVI_MPY0, CVI_MPY1]>], [9, 5, 5],
+ [HVX_FWD, HVX_FWD, HVX_FWD]>,
+
+ InstrItinData <tc_9311da3f, /*SLOT23,VX*/
+ [InstrStage<1, [SLOT2, SLOT3], 0>,
+ InstrStage<1, [CVI_MPY0, CVI_MPY1]>], [9, 7, 7, 2],
+ [HVX_FWD, HVX_FWD, HVX_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_9777e6bf, /*SLOT0,VA*/
+ [InstrStage<1, [SLOT0], 0>,
+ InstrStage<1, [CVI_MPY0, CVI_MPY1, CVI_SHIFT, CVI_XLANE]>], [4, 7, 1],
+ [Hex_FWD, HVX_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_97c165b9, /*SLOT0123,VA_DV*/
+ [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3], 0>,
+ InstrStage<1, [CVI_MPY01, CVI_XLSHF]>], [9, 7, 7],
+ [HVX_FWD, HVX_FWD, HVX_FWD]>,
+
+ InstrItinData <tc_99093773, /*SLOT0,STORE,VA*/
+ [InstrStage<1, [SLOT0], 0>,
+ InstrStage<1, [CVI_ST], 0>,
+ InstrStage<1, [CVI_MPY0, CVI_MPY1, CVI_SHIFT, CVI_XLANE]>], [3, 7, 1, 2, 7],
+ [Hex_FWD, HVX_FWD, Hex_FWD, Hex_FWD, HVX_FWD]>,
+
+ InstrItinData <tc_9b9642a1, /*SLOT0123,VS*/
+ [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3], 0>,
+ InstrStage<1, [CVI_SHIFT]>], [9, 5, 5],
+ [HVX_FWD, HVX_FWD, HVX_FWD]>,
+
+ InstrItinData <tc_9c267309, /*SLOT01,LOAD*/
+ [InstrStage<1, [SLOT0, SLOT1], 0>,
+ InstrStage<1, [CVI_LD]>], [9, 3, 1, 2],
+ [HVX_FWD, Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_a3127e12, /*SLOT0123,VA*/
+ [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3], 0>,
+ InstrStage<1, [CVI_MPY0, CVI_MPY1, CVI_SHIFT, CVI_XLANE]>], [9, 7, 7, 7],
+ [HVX_FWD, HVX_FWD, HVX_FWD, HVX_FWD]>,
+
+ InstrItinData <tc_a4c9df3b, /*SLOT0,STORE,VA*/
+ [InstrStage<1, [SLOT0], 0>,
+ InstrStage<1, [CVI_ST], 0>,
+ InstrStage<1, [CVI_MPY0, CVI_MPY1, CVI_SHIFT, CVI_XLANE]>], [3, 1, 2, 7],
+ [Hex_FWD, Hex_FWD, Hex_FWD, HVX_FWD]>,
+
+ InstrItinData <tc_aedb9f9e, /*SLOT0,STORE,VA*/
+ [InstrStage<1, [SLOT0], 0>,
+ InstrStage<1, [CVI_ST], 0>,
+ InstrStage<1, [CVI_MPY0, CVI_MPY1, CVI_SHIFT, CVI_XLANE]>], [7, 1, 2, 7],
+ [HVX_FWD, Hex_FWD, Hex_FWD, HVX_FWD]>,
+
+ InstrItinData <tc_b06ab583, /*SLOT0123,VA*/
+ [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3], 0>,
+ InstrStage<1, [CVI_MPY0, CVI_MPY1, CVI_SHIFT, CVI_XLANE]>], [9, 2, 7],
+ [HVX_FWD, Hex_FWD, HVX_FWD]>,
+
+ InstrItinData <tc_b712833a, /*SLOT01,LOAD,VA*/
+ [InstrStage<1, [SLOT0, SLOT1], 0>,
+ InstrStage<1, [CVI_LD], 0>,
+ InstrStage<1, [CVI_MPY0, CVI_MPY1, CVI_SHIFT, CVI_XLANE]>], [9, 1, 2],
+ [HVX_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_b77635b4, /*SLOT0123,4SLOT*/
+ [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3], 0>,
+ InstrStage<1, [CVI_ALL]>], [2],
+ [Hex_FWD]>,
+
+ InstrItinData <tc_bbaf280e, /*SLOT0123,VA*/
+ [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3], 0>,
+ InstrStage<1, [CVI_MPY0, CVI_MPY1, CVI_SHIFT, CVI_XLANE]>], [9, 7, 7],
+ [HVX_FWD, HVX_FWD, HVX_FWD]>,
+
+ InstrItinData <tc_bf142ae2, /*SLOT0123,VP*/
+ [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3], 0>,
+ InstrStage<1, [CVI_XLANE]>], [9, 5, 2],
+ [HVX_FWD, HVX_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_c00bf9c9, /*SLOT0123,VS*/
+ [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3], 0>,
+ InstrStage<1, [CVI_SHIFT]>], [9, 7, 5, 2],
+ [HVX_FWD, HVX_FWD, HVX_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_c4b515c5, /*SLOT0123,VP*/
+ [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3], 0>,
+ InstrStage<1, [CVI_XLANE]>], [9, 5, 5, 2],
+ [HVX_FWD, HVX_FWD, HVX_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_cbf6d1dc, /*SLOT0123,VP_VS*/
+ [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3], 0>,
+ InstrStage<1, [CVI_XLSHF]>], [9, 7, 5, 5, 2],
+ [HVX_FWD, HVX_FWD, HVX_FWD, HVX_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_cedf314b, /*SLOT0123,4SLOT*/
+ [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3], 0>,
+ InstrStage<1, [CVI_ALL]>], [3],
+ [HVX_FWD]>,
+
+ InstrItinData <tc_d2cb81ea, /*SLOT0123,VS*/
+ [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3], 0>,
+ InstrStage<1, [CVI_SHIFT]>], [9, 5],
+ [HVX_FWD, HVX_FWD]>,
+
+ InstrItinData <tc_d5090f3e, /*SLOT0,STORE*/
+ [InstrStage<1, [SLOT0], 0>,
+ InstrStage<1, [CVI_ST]>], [2, 1, 2, 5],
+ [Hex_FWD, Hex_FWD, Hex_FWD, HVX_FWD]>,
+
+ InstrItinData <tc_d642eff3, /*SLOT0,NOSLOT1,STORE,VP*/
+ [InstrStage<1, [SLOT0], 0>,
+ InstrStage<1, [SLOT1], 0>,
+ InstrStage<1, [CVI_ST], 0>,
+ InstrStage<1, [CVI_XLANE]>], [2, 1, 2, 5],
+ [Hex_FWD, Hex_FWD, Hex_FWD, HVX_FWD]>,
+
+ InstrItinData <tc_d725e5b0, /*SLOT23,VX*/
+ [InstrStage<1, [SLOT2, SLOT3], 0>,
+ InstrStage<1, [CVI_MPY0, CVI_MPY1]>], [9, 7, 5, 2],
+ [HVX_FWD, HVX_FWD, HVX_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_d7bea0ec, /*SLOT0123,VP_VS*/
+ [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3], 0>,
+ InstrStage<1, [CVI_XLSHF]>], [9, 5],
+ [HVX_FWD, HVX_FWD]>,
+
+ InstrItinData <tc_d98f4d63, /*SLOT23,VX_DV*/
+ [InstrStage<1, [SLOT2, SLOT3], 0>,
+ InstrStage<1, [CVI_MPY01]>], [9, 7, 5, 2],
+ [HVX_FWD, HVX_FWD, HVX_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_da979fb3, /*SLOT01,LOAD,VA*/
+ [InstrStage<1, [SLOT0, SLOT1], 0>,
+ InstrStage<1, [CVI_LD], 0>,
+ InstrStage<1, [CVI_MPY0, CVI_MPY1, CVI_SHIFT, CVI_XLANE]>], [9, 3, 2, 1, 2],
+ [HVX_FWD, Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_db5b9e2f, /*SLOT0,STORE*/
+ [InstrStage<1, [SLOT0], 0>,
+ InstrStage<1, [CVI_ST]>], [3, 1, 2, 5],
+ [Hex_FWD, Hex_FWD, Hex_FWD, HVX_FWD]>,
+
+ InstrItinData <tc_e172d86a, /*SLOT23,VX_DV*/
+ [InstrStage<1, [SLOT2, SLOT3], 0>,
+ InstrStage<1, [CVI_MPY01]>], [9, 7, 5, 5],
+ [HVX_FWD, HVX_FWD, HVX_FWD, HVX_FWD]>,
+
+ InstrItinData <tc_e231aa4f, /*SLOT23,VX*/
+ [InstrStage<1, [SLOT2, SLOT3], 0>,
+ InstrStage<1, [CVI_MPY0, CVI_MPY1]>], [9, 7, 2],
+ [HVX_FWD, HVX_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_e3748cdf, /*SLOT0,STORE,VA*/
+ [InstrStage<1, [SLOT0], 0>,
+ InstrStage<1, [CVI_ST], 0>,
+ InstrStage<1, [CVI_MPY0, CVI_MPY1, CVI_SHIFT, CVI_XLANE]>], [1, 2, 7],
+ [Hex_FWD, Hex_FWD, HVX_FWD]>,
+
+ InstrItinData <tc_e5053c8f, /*SLOT0123,4SLOT*/
+ [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3], 0>,
+ InstrStage<1, [CVI_ALL]>], [],
+ []>,
+
+ InstrItinData <tc_e6299d16, /*SLOT0123,VP*/
+ [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3], 0>,
+ InstrStage<1, [CVI_XLANE]>], [9, 5],
+ [HVX_FWD, HVX_FWD]>,
+
+ InstrItinData <tc_eb669007, /*SLOT01,LOAD,VA*/
+ [InstrStage<1, [SLOT0, SLOT1], 0>,
+ InstrStage<1, [CVI_LD], 0>,
+ InstrStage<1, [CVI_MPY0, CVI_MPY1, CVI_SHIFT, CVI_XLANE]>], [9, 3, 1, 2],
+ [HVX_FWD, Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_eda67dcd, /*SLOT23,VX_DV*/
+ [InstrStage<1, [SLOT2, SLOT3], 0>,
+ InstrStage<1, [CVI_MPY01]>], [9, 5, 5],
+ [HVX_FWD, HVX_FWD, HVX_FWD]>,
+
+ InstrItinData <tc_f3fc3f83, /*SLOT0123,VP*/
+ [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3], 0>,
+ InstrStage<1, [CVI_XLANE]>], [9, 5, 5],
+ [HVX_FWD, HVX_FWD, HVX_FWD]>
+ ];
+}
+
+class DepHVXItinV62 {
+ list<InstrItinData> DepHVXItinV62_list = [
+ InstrItinData <tc_0317c6ca, /*SLOT0,STORE,VA*/
+ [InstrStage<1, [SLOT0], 0>,
+ InstrStage<1, [CVI_ST], 0>,
+ InstrStage<1, [CVI_MPY0, CVI_MPY1, CVI_SHIFT, CVI_XLANE]>], [3, 2, 1, 2, 7],
+ [Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD, HVX_FWD]>,
+
+ InstrItinData <tc_1b93bdc6, /*SLOT0,STORE*/
+ [InstrStage<1, [SLOT0], 0>,
+ InstrStage<1, [CVI_ST]>], [1, 2, 5],
+ [Hex_FWD, Hex_FWD, HVX_FWD]>,
+
+ InstrItinData <tc_2171ebae, /*SLOT0123,VA_DV*/
+ [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3], 0>,
+ InstrStage<1, [CVI_MPY01, CVI_XLSHF]>], [9, 2, 7, 7],
+ [HVX_FWD, Hex_FWD, HVX_FWD, HVX_FWD]>,
+
+ InstrItinData <tc_28978789, /*SLOT0123,4SLOT*/
+ [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3], 0>,
+ InstrStage<1, [CVI_ALL]>], [3, 2],
+ [HVX_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_316c637c, /*SLOT0123,VA_DV*/
+ [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3], 0>,
+ InstrStage<1, [CVI_MPY01, CVI_XLSHF]>], [9, 7, 7, 7],
+ [HVX_FWD, HVX_FWD, HVX_FWD, HVX_FWD]>,
+
+ InstrItinData <tc_354299ad, /*SLOT0,NOSLOT1,STORE,VP*/
+ [InstrStage<1, [SLOT0], 0>,
+ InstrStage<1, [SLOT1], 0>,
+ InstrStage<1, [CVI_ST], 0>,
+ InstrStage<1, [CVI_XLANE]>], [1, 2, 5],
+ [Hex_FWD, Hex_FWD, HVX_FWD]>,
+
+ InstrItinData <tc_35e92f8e, /*SLOT0,NOSLOT1,LOAD,VP*/
+ [InstrStage<1, [SLOT0], 0>,
+ InstrStage<1, [SLOT1], 0>,
+ InstrStage<1, [CVI_LD], 0>,
+ InstrStage<1, [CVI_XLANE]>], [9, 1, 2],
+ [HVX_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_38208312, /*SLOT01,LOAD*/
+ [InstrStage<1, [SLOT0, SLOT1], 0>,
+ InstrStage<1, [CVI_LD]>], [9, 3, 2, 1, 2],
+ [HVX_FWD, Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_4105d6b5, /*SLOT0123,VP*/
+ [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3], 0>,
+ InstrStage<1, [CVI_XLANE]>], [9, 2],
+ [HVX_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_41f4b64e, /*SLOT0123,VS*/
+ [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3], 0>,
+ InstrStage<1, [CVI_SHIFT]>], [9, 5, 2],
+ [HVX_FWD, HVX_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_41f99e1c, /*SLOT23,VX_DV*/
+ [InstrStage<1, [SLOT2, SLOT3], 0>,
+ InstrStage<1, [CVI_MPY01]>], [9, 7, 5, 2, 2],
+ [HVX_FWD, HVX_FWD, HVX_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_45453b98, /*SLOT0123,VS*/
+ [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3], 0>,
+ InstrStage<1, [CVI_SHIFT]>], [9, 5, 5],
+ [HVX_FWD, HVX_FWD, HVX_FWD]>,
+
+ InstrItinData <tc_4e2a5159, /*SLOT0123,VP_VS*/
+ [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3], 0>,
+ InstrStage<1, [CVI_XLSHF]>], [9, 5, 5, 2],
+ [HVX_FWD, HVX_FWD, HVX_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_4fd8566e, /*SLOT0,NOSLOT1,LOAD,VP*/
+ [InstrStage<1, [SLOT0], 0>,
+ InstrStage<1, [SLOT1], 0>,
+ InstrStage<1, [CVI_LD], 0>,
+ InstrStage<1, [CVI_XLANE]>], [9, 3, 1, 2],
+ [HVX_FWD, Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_51cd3aab, /*SLOT01,LOAD*/
+ [InstrStage<1, [SLOT0, SLOT1], 0>,
+ InstrStage<1, [CVI_LD]>], [9, 2, 1, 2],
+ [HVX_FWD, Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_5a9fc4ec, /*SLOT0123,VA*/
+ [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3], 0>,
+ InstrStage<1, [CVI_MPY0, CVI_MPY1, CVI_SHIFT, CVI_XLANE]>], [9, 9, 7, 7, 7],
+ [HVX_FWD, HVX_FWD, HVX_FWD, HVX_FWD, HVX_FWD]>,
+
+ InstrItinData <tc_5c120602, /*SLOT0123,VP_VS*/
+ [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3], 0>,
+ InstrStage<1, [CVI_XLSHF]>], [9, 9, 5, 5, 2],
+ [HVX_FWD, HVX_FWD, HVX_FWD, HVX_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_5cbf490b, /*SLOT01,LOAD,VA*/
+ [InstrStage<1, [SLOT0, SLOT1], 0>,
+ InstrStage<1, [CVI_LD], 0>,
+ InstrStage<1, [CVI_MPY0, CVI_MPY1, CVI_SHIFT, CVI_XLANE]>], [9, 2, 1, 2],
+ [HVX_FWD, Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_644584f8, /*SLOT0123,VA_DV*/
+ [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3], 0>,
+ InstrStage<1, [CVI_MPY01, CVI_XLSHF]>], [9, 7],
+ [HVX_FWD, HVX_FWD]>,
+
+ InstrItinData <tc_69b6dd20, /*SLOT23,VX*/
+ [InstrStage<1, [SLOT2, SLOT3], 0>,
+ InstrStage<1, [CVI_MPY0, CVI_MPY1]>], [9, 5, 2],
+ [HVX_FWD, HVX_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_6b78cf13, /*SLOT23,VX*/
+ [InstrStage<1, [SLOT2, SLOT3], 0>,
+ InstrStage<1, [CVI_MPY0, CVI_MPY1]>], [9, 2],
+ [HVX_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_6fd9ad30, /*SLOT0,NOSLOT1,STORE,VP*/
+ [InstrStage<1, [SLOT0], 0>,
+ InstrStage<1, [SLOT1], 0>,
+ InstrStage<1, [CVI_ST], 0>,
+ InstrStage<1, [CVI_XLANE]>], [3, 2, 1, 2, 5],
+ [Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD, HVX_FWD]>,
+
+ InstrItinData <tc_71337255, /*SLOT0123,VA*/
+ [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3], 0>,
+ InstrStage<1, [CVI_MPY0, CVI_MPY1, CVI_SHIFT, CVI_XLANE]>], [9, 7],
+ [HVX_FWD, HVX_FWD]>,
+
+ InstrItinData <tc_72ad7b54, /*SLOT0123,VP_VS*/
+ [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3], 0>,
+ InstrStage<1, [CVI_XLSHF]>], [9, 7, 5],
+ [HVX_FWD, HVX_FWD, HVX_FWD]>,
+
+ InstrItinData <tc_77a4c701, /*SLOT01,LOAD*/
+ [InstrStage<1, [SLOT0, SLOT1], 0>,
+ InstrStage<1, [CVI_LD]>], [9, 1, 2],
+ [HVX_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_7c3f55c4, /*SLOT23,VX_DV*/
+ [InstrStage<1, [SLOT2, SLOT3], 0>,
+ InstrStage<1, [CVI_MPY01]>], [9, 5, 2],
+ [HVX_FWD, HVX_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_7e9f581b, /*SLOT23,VX_DV*/
+ [InstrStage<1, [SLOT2, SLOT3], 0>,
+ InstrStage<1, [CVI_MPY01]>], [9, 5, 2, 2],
+ [HVX_FWD, HVX_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_7fa82b08, /*SLOT0,NOSLOT1,STORE,VP*/
+ [InstrStage<1, [SLOT0], 0>,
+ InstrStage<1, [SLOT1], 0>,
+ InstrStage<1, [CVI_ST], 0>,
+ InstrStage<1, [CVI_XLANE]>], [3, 1, 2, 5],
+ [Hex_FWD, Hex_FWD, Hex_FWD, HVX_FWD]>,
+
+ InstrItinData <tc_7fa8b40f, /*SLOT0123,VS*/
+ [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3], 0>,
+ InstrStage<1, [CVI_SHIFT]>], [9, 5, 5, 2],
+ [HVX_FWD, HVX_FWD, HVX_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_85d237e3, /*SLOT0,STORE,VA*/
+ [InstrStage<1, [SLOT0], 0>,
+ InstrStage<1, [CVI_ST], 0>,
+ InstrStage<1, [CVI_MPY0, CVI_MPY1, CVI_SHIFT, CVI_XLANE]>], [2, 1, 2, 7],
+ [Hex_FWD, Hex_FWD, Hex_FWD, HVX_FWD]>,
+
+ InstrItinData <tc_8b6a873f, /*SLOT0,STORE*/
+ [InstrStage<1, [SLOT0], 0>,
+ InstrStage<1, [CVI_ST]>], [3, 2, 1, 2, 5],
+ [Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD, HVX_FWD]>,
+
+ InstrItinData <tc_908a4c8c, /*SLOT23,VX*/
+ [InstrStage<1, [SLOT2, SLOT3], 0>,
+ InstrStage<1, [CVI_MPY0, CVI_MPY1]>], [9, 5, 5],
+ [HVX_FWD, HVX_FWD, HVX_FWD]>,
+
+ InstrItinData <tc_9311da3f, /*SLOT23,VX*/
+ [InstrStage<1, [SLOT2, SLOT3], 0>,
+ InstrStage<1, [CVI_MPY0, CVI_MPY1]>], [9, 7, 7, 2],
+ [HVX_FWD, HVX_FWD, HVX_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_9777e6bf, /*SLOT0,VA*/
+ [InstrStage<1, [SLOT0], 0>,
+ InstrStage<1, [CVI_MPY0, CVI_MPY1, CVI_SHIFT, CVI_XLANE]>], [4, 7, 1],
+ [Hex_FWD, HVX_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_97c165b9, /*SLOT0123,VA_DV*/
+ [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3], 0>,
+ InstrStage<1, [CVI_MPY01, CVI_XLSHF]>], [9, 7, 7],
+ [HVX_FWD, HVX_FWD, HVX_FWD]>,
+
+ InstrItinData <tc_99093773, /*SLOT0,STORE,VA*/
+ [InstrStage<1, [SLOT0], 0>,
+ InstrStage<1, [CVI_ST], 0>,
+ InstrStage<1, [CVI_MPY0, CVI_MPY1, CVI_SHIFT, CVI_XLANE]>], [3, 7, 1, 2, 7],
+ [Hex_FWD, HVX_FWD, Hex_FWD, Hex_FWD, HVX_FWD]>,
+
+ InstrItinData <tc_9b9642a1, /*SLOT0123,VA*/
+ [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3], 0>,
+ InstrStage<1, [CVI_MPY0, CVI_MPY1, CVI_SHIFT, CVI_XLANE]>], [9, 7, 7],
+ [HVX_FWD, HVX_FWD, HVX_FWD]>,
+
+ InstrItinData <tc_9c267309, /*SLOT01,LOAD*/
+ [InstrStage<1, [SLOT0, SLOT1], 0>,
+ InstrStage<1, [CVI_LD]>], [9, 3, 1, 2],
+ [HVX_FWD, Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_a3127e12, /*SLOT0123,VA*/
+ [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3], 0>,
+ InstrStage<1, [CVI_MPY0, CVI_MPY1, CVI_SHIFT, CVI_XLANE]>], [9, 7, 7, 7],
+ [HVX_FWD, HVX_FWD, HVX_FWD, HVX_FWD]>,
+
+ InstrItinData <tc_a4c9df3b, /*SLOT0,STORE,VA*/
+ [InstrStage<1, [SLOT0], 0>,
+ InstrStage<1, [CVI_ST], 0>,
+ InstrStage<1, [CVI_MPY0, CVI_MPY1, CVI_SHIFT, CVI_XLANE]>], [3, 1, 2, 7],
+ [Hex_FWD, Hex_FWD, Hex_FWD, HVX_FWD]>,
+
+ InstrItinData <tc_aedb9f9e, /*SLOT0,STORE,VA*/
+ [InstrStage<1, [SLOT0], 0>,
+ InstrStage<1, [CVI_ST], 0>,
+ InstrStage<1, [CVI_MPY0, CVI_MPY1, CVI_SHIFT, CVI_XLANE]>], [7, 1, 2, 7],
+ [HVX_FWD, Hex_FWD, Hex_FWD, HVX_FWD]>,
+
+ InstrItinData <tc_b06ab583, /*SLOT0123,VA*/
+ [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3], 0>,
+ InstrStage<1, [CVI_MPY0, CVI_MPY1, CVI_SHIFT, CVI_XLANE]>], [9, 2, 7],
+ [HVX_FWD, Hex_FWD, HVX_FWD]>,
+
+ InstrItinData <tc_b712833a, /*SLOT01,LOAD,VA*/
+ [InstrStage<1, [SLOT0, SLOT1], 0>,
+ InstrStage<1, [CVI_LD], 0>,
+ InstrStage<1, [CVI_MPY0, CVI_MPY1, CVI_SHIFT, CVI_XLANE]>], [9, 1, 2],
+ [HVX_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_b77635b4, /*SLOT0123,4SLOT*/
+ [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3], 0>,
+ InstrStage<1, [CVI_ALL]>], [2],
+ [Hex_FWD]>,
+
+ InstrItinData <tc_bbaf280e, /*SLOT0123,VA*/
+ [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3], 0>,
+ InstrStage<1, [CVI_MPY0, CVI_MPY1, CVI_SHIFT, CVI_XLANE]>], [9, 7, 7],
+ [HVX_FWD, HVX_FWD, HVX_FWD]>,
+
+ InstrItinData <tc_bf142ae2, /*SLOT0123,VP*/
+ [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3], 0>,
+ InstrStage<1, [CVI_XLANE]>], [9, 5, 2],
+ [HVX_FWD, HVX_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_c00bf9c9, /*SLOT0123,VS*/
+ [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3], 0>,
+ InstrStage<1, [CVI_SHIFT]>], [9, 7, 5, 2],
+ [HVX_FWD, HVX_FWD, HVX_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_c4b515c5, /*SLOT0123,VP*/
+ [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3], 0>,
+ InstrStage<1, [CVI_XLANE]>], [9, 5, 5, 2],
+ [HVX_FWD, HVX_FWD, HVX_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_cbf6d1dc, /*SLOT0123,VP_VS*/
+ [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3], 0>,
+ InstrStage<1, [CVI_XLSHF]>], [9, 7, 5, 5, 2],
+ [HVX_FWD, HVX_FWD, HVX_FWD, HVX_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_cedf314b, /*SLOT0123,4SLOT*/
+ [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3], 0>,
+ InstrStage<1, [CVI_ALL]>], [3],
+ [HVX_FWD]>,
+
+ InstrItinData <tc_d2cb81ea, /*SLOT0123,VS*/
+ [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3], 0>,
+ InstrStage<1, [CVI_SHIFT]>], [9, 5],
+ [HVX_FWD, HVX_FWD]>,
+
+ InstrItinData <tc_d5090f3e, /*SLOT0,STORE*/
+ [InstrStage<1, [SLOT0], 0>,
+ InstrStage<1, [CVI_ST]>], [2, 1, 2, 5],
+ [Hex_FWD, Hex_FWD, Hex_FWD, HVX_FWD]>,
+
+ InstrItinData <tc_d642eff3, /*SLOT0,NOSLOT1,STORE,VP*/
+ [InstrStage<1, [SLOT0], 0>,
+ InstrStage<1, [SLOT1], 0>,
+ InstrStage<1, [CVI_ST], 0>,
+ InstrStage<1, [CVI_XLANE]>], [2, 1, 2, 5],
+ [Hex_FWD, Hex_FWD, Hex_FWD, HVX_FWD]>,
+
+ InstrItinData <tc_d725e5b0, /*SLOT23,VX*/
+ [InstrStage<1, [SLOT2, SLOT3], 0>,
+ InstrStage<1, [CVI_MPY0, CVI_MPY1]>], [9, 7, 5, 2],
+ [HVX_FWD, HVX_FWD, HVX_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_d7bea0ec, /*SLOT0123,VP_VS*/
+ [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3], 0>,
+ InstrStage<1, [CVI_XLSHF]>], [9, 5],
+ [HVX_FWD, HVX_FWD]>,
+
+ InstrItinData <tc_d98f4d63, /*SLOT23,VX_DV*/
+ [InstrStage<1, [SLOT2, SLOT3], 0>,
+ InstrStage<1, [CVI_MPY01]>], [9, 7, 5, 2],
+ [HVX_FWD, HVX_FWD, HVX_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_da979fb3, /*SLOT01,LOAD,VA*/
+ [InstrStage<1, [SLOT0, SLOT1], 0>,
+ InstrStage<1, [CVI_LD], 0>,
+ InstrStage<1, [CVI_MPY0, CVI_MPY1, CVI_SHIFT, CVI_XLANE]>], [9, 3, 2, 1, 2],
+ [HVX_FWD, Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_db5b9e2f, /*SLOT0,STORE*/
+ [InstrStage<1, [SLOT0], 0>,
+ InstrStage<1, [CVI_ST]>], [3, 1, 2, 5],
+ [Hex_FWD, Hex_FWD, Hex_FWD, HVX_FWD]>,
+
+ InstrItinData <tc_e172d86a, /*SLOT23,VX_DV*/
+ [InstrStage<1, [SLOT2, SLOT3], 0>,
+ InstrStage<1, [CVI_MPY01]>], [9, 7, 5, 5],
+ [HVX_FWD, HVX_FWD, HVX_FWD, HVX_FWD]>,
+
+ InstrItinData <tc_e231aa4f, /*SLOT23,VX*/
+ [InstrStage<1, [SLOT2, SLOT3], 0>,
+ InstrStage<1, [CVI_MPY0, CVI_MPY1]>], [9, 7, 2],
+ [HVX_FWD, HVX_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_e3748cdf, /*SLOT0,STORE,VA*/
+ [InstrStage<1, [SLOT0], 0>,
+ InstrStage<1, [CVI_ST], 0>,
+ InstrStage<1, [CVI_MPY0, CVI_MPY1, CVI_SHIFT, CVI_XLANE]>], [1, 2, 7],
+ [Hex_FWD, Hex_FWD, HVX_FWD]>,
+
+ InstrItinData <tc_e5053c8f, /*SLOT0123,4SLOT*/
+ [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3], 0>,
+ InstrStage<1, [CVI_ALL]>], [],
+ []>,
+
+ InstrItinData <tc_e6299d16, /*SLOT0123,VP*/
+ [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3], 0>,
+ InstrStage<1, [CVI_XLANE]>], [9, 5],
+ [HVX_FWD, HVX_FWD]>,
+
+ InstrItinData <tc_eb669007, /*SLOT01,LOAD,VA*/
+ [InstrStage<1, [SLOT0, SLOT1], 0>,
+ InstrStage<1, [CVI_LD], 0>,
+ InstrStage<1, [CVI_MPY0, CVI_MPY1, CVI_SHIFT, CVI_XLANE]>], [9, 3, 1, 2],
+ [HVX_FWD, Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_eda67dcd, /*SLOT23,VX_DV*/
+ [InstrStage<1, [SLOT2, SLOT3], 0>,
+ InstrStage<1, [CVI_MPY01]>], [9, 5, 5],
+ [HVX_FWD, HVX_FWD, HVX_FWD]>,
+
+ InstrItinData <tc_f3fc3f83, /*SLOT0123,VP*/
+ [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3], 0>,
+ InstrStage<1, [CVI_XLANE]>], [9, 5, 5],
+ [HVX_FWD, HVX_FWD, HVX_FWD]>
+ ];
+}
diff --git a/lib/Target/Hexagon/HexagonDepIICScalar.td b/lib/Target/Hexagon/HexagonDepIICScalar.td
new file mode 100644
index 000000000000..261778bda724
--- /dev/null
+++ b/lib/Target/Hexagon/HexagonDepIICScalar.td
@@ -0,0 +1,2504 @@
+//===--- HexagonDepIICScalar.td -------------------------------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+def tc_049dfb74 : InstrItinClass;
+def tc_0767081f : InstrItinClass;
+def tc_07ac815d : InstrItinClass;
+def tc_090485bb : InstrItinClass;
+def tc_09c86199 : InstrItinClass;
+def tc_09faec3b : InstrItinClass;
+def tc_0cb867f2 : InstrItinClass;
+def tc_1000eb10 : InstrItinClass;
+def tc_128719e8 : InstrItinClass;
+def tc_136c4786 : InstrItinClass;
+def tc_14da557c : InstrItinClass;
+def tc_1b6011fb : InstrItinClass;
+def tc_1b834fe7 : InstrItinClass;
+def tc_1e062b18 : InstrItinClass;
+def tc_1e69aa99 : InstrItinClass;
+def tc_1f9668cc : InstrItinClass;
+def tc_1fe8323c : InstrItinClass;
+def tc_20a8e109 : InstrItinClass;
+def tc_210b2456 : InstrItinClass;
+def tc_251c87b2 : InstrItinClass;
+def tc_261d9b78 : InstrItinClass;
+def tc_28d296df : InstrItinClass;
+def tc_29c14515 : InstrItinClass;
+def tc_2aaab1e0 : InstrItinClass;
+def tc_2c8fe5ae : InstrItinClass;
+def tc_2d1e6f5c : InstrItinClass;
+def tc_2e55aa16 : InstrItinClass;
+def tc_30665cb0 : InstrItinClass;
+def tc_336e698c : InstrItinClass;
+def tc_34e882a4 : InstrItinClass;
+def tc_35fb9d13 : InstrItinClass;
+def tc_37326008 : InstrItinClass;
+def tc_3993c58b : InstrItinClass;
+def tc_3b4892c6 : InstrItinClass;
+def tc_3bea1824 : InstrItinClass;
+def tc_3c10f809 : InstrItinClass;
+def tc_3d905451 : InstrItinClass;
+def tc_3e61d314 : InstrItinClass;
+def tc_3eab77bd : InstrItinClass;
+def tc_43068634 : InstrItinClass;
+def tc_45631a8d : InstrItinClass;
+def tc_47ab9233 : InstrItinClass;
+def tc_47f0b7ad : InstrItinClass;
+def tc_485bb57c : InstrItinClass;
+def tc_4997da4a : InstrItinClass;
+def tc_511f28f6 : InstrItinClass;
+def tc_537e2013 : InstrItinClass;
+def tc_53ee6546 : InstrItinClass;
+def tc_548f402d : InstrItinClass;
+def tc_5625c6c1 : InstrItinClass;
+def tc_580a779c : InstrItinClass;
+def tc_583510c7 : InstrItinClass;
+def tc_5d806107 : InstrItinClass;
+def tc_5fa2857c : InstrItinClass;
+def tc_5fe9fcd0 : InstrItinClass;
+def tc_6264c5e0 : InstrItinClass;
+def tc_639d93ee : InstrItinClass;
+def tc_63cd9d2d : InstrItinClass;
+def tc_65dc7cc4 : InstrItinClass;
+def tc_69bb508b : InstrItinClass;
+def tc_6c52d277 : InstrItinClass;
+def tc_6c576d46 : InstrItinClass;
+def tc_70cabf66 : InstrItinClass;
+def tc_7639d4b0 : InstrItinClass;
+def tc_7675c0e9 : InstrItinClass;
+def tc_76c4c5ef : InstrItinClass;
+def tc_77781686 : InstrItinClass;
+def tc_78b3c689 : InstrItinClass;
+def tc_7986ba30 : InstrItinClass;
+def tc_7bc567a7 : InstrItinClass;
+def tc_7c2dcd4d : InstrItinClass;
+def tc_7ca2ea10 : InstrItinClass;
+def tc_7d01cbdc : InstrItinClass;
+def tc_7d9a56cd : InstrItinClass;
+def tc_81a23d44 : InstrItinClass;
+def tc_821c4233 : InstrItinClass;
+def tc_82f0f122 : InstrItinClass;
+def tc_84630363 : InstrItinClass;
+def tc_86442910 : InstrItinClass;
+def tc_87601822 : InstrItinClass;
+def tc_88fa2da6 : InstrItinClass;
+def tc_8c8041e6 : InstrItinClass;
+def tc_8cb685d9 : InstrItinClass;
+def tc_8def9c57 : InstrItinClass;
+def tc_8f0a6bad : InstrItinClass;
+def tc_8fab9ac3 : InstrItinClass;
+def tc_92d1833c : InstrItinClass;
+def tc_94e6ffd9 : InstrItinClass;
+def tc_95c54f8b : InstrItinClass;
+def tc_9a13af9d : InstrItinClass;
+def tc_9b73d261 : InstrItinClass;
+def tc_9c18c9a5 : InstrItinClass;
+def tc_9c68db63 : InstrItinClass;
+def tc_9ce7a5ab : InstrItinClass;
+def tc_9da3628f : InstrItinClass;
+def tc_9dafb7d3 : InstrItinClass;
+def tc_9df8b0dc : InstrItinClass;
+def tc_9e86015f : InstrItinClass;
+def tc_9f518242 : InstrItinClass;
+def tc_a12a5971 : InstrItinClass;
+def tc_a1fb80e1 : InstrItinClass;
+def tc_a333d2a9 : InstrItinClass;
+def tc_a4567c39 : InstrItinClass;
+def tc_a87879e8 : InstrItinClass;
+def tc_a9c993d9 : InstrItinClass;
+def tc_aad55963 : InstrItinClass;
+def tc_ab1b5e74 : InstrItinClass;
+def tc_ae0722f7 : InstrItinClass;
+def tc_ae2c2dc2 : InstrItinClass;
+def tc_ae762521 : InstrItinClass;
+def tc_b08b653e : InstrItinClass;
+def tc_b08be45e : InstrItinClass;
+def tc_b0f50e3c : InstrItinClass;
+def tc_b189ad4c : InstrItinClass;
+def tc_b324366f : InstrItinClass;
+def tc_b5bfaa60 : InstrItinClass;
+def tc_b5f5a094 : InstrItinClass;
+def tc_b86c7e8b : InstrItinClass;
+def tc_baccf077 : InstrItinClass;
+def tc_bc5561d8 : InstrItinClass;
+def tc_bcf0e36e : InstrItinClass;
+def tc_bd16579e : InstrItinClass;
+def tc_be995eaf : InstrItinClass;
+def tc_bf6fa601 : InstrItinClass;
+def tc_c0cd91a8 : InstrItinClass;
+def tc_c14739d5 : InstrItinClass;
+def tc_c1dbc916 : InstrItinClass;
+def tc_c58f771a : InstrItinClass;
+def tc_c85212ca : InstrItinClass;
+def tc_c8f9a6f6 : InstrItinClass;
+def tc_ca280e8b : InstrItinClass;
+def tc_cbe45117 : InstrItinClass;
+def tc_cd321066 : InstrItinClass;
+def tc_d108a090 : InstrItinClass;
+def tc_d1b5a4b6 : InstrItinClass;
+def tc_d2609065 : InstrItinClass;
+def tc_d267fa19 : InstrItinClass;
+def tc_d2a33af5 : InstrItinClass;
+def tc_d63b71d1 : InstrItinClass;
+def tc_d6a805a8 : InstrItinClass;
+def tc_d95f4e98 : InstrItinClass;
+def tc_da79106e : InstrItinClass;
+def tc_dbe218dd : InstrItinClass;
+def tc_dcfee7ae : InstrItinClass;
+def tc_e17ce9ad : InstrItinClass;
+def tc_e2480a7f : InstrItinClass;
+def tc_e2c08bb4 : InstrItinClass;
+def tc_e2c31426 : InstrItinClass;
+def tc_e578178f : InstrItinClass;
+def tc_e836c161 : InstrItinClass;
+def tc_e8c7a357 : InstrItinClass;
+def tc_eb07ef6f : InstrItinClass;
+def tc_ecfaae86 : InstrItinClass;
+def tc_ef0ebaaa : InstrItinClass;
+def tc_ef2676fd : InstrItinClass;
+def tc_f027ebe9 : InstrItinClass;
+def tc_f055fbb6 : InstrItinClass;
+def tc_f1240c08 : InstrItinClass;
+def tc_f16d5b17 : InstrItinClass;
+def tc_f1aa2cdb : InstrItinClass;
+def tc_f26aa619 : InstrItinClass;
+def tc_f4608adc : InstrItinClass;
+def tc_faab1248 : InstrItinClass;
+def tc_fcee8723 : InstrItinClass;
+def tc_feb4974b : InstrItinClass;
+
+class DepScalarItinV4 {
+ list<InstrItinData> DepScalarItinV4_list = [
+ InstrItinData <tc_049dfb74, [InstrStage<1, [SLOT2]>]>,
+ InstrItinData <tc_0767081f, [InstrStage<1, [SLOT2, SLOT3]>]>,
+ InstrItinData <tc_07ac815d, [InstrStage<1, [SLOT2]>]>,
+ InstrItinData <tc_090485bb, [InstrStage<1, [SLOT2, SLOT3]>]>,
+ InstrItinData <tc_09c86199, [InstrStage<1, [SLOT2, SLOT3]>]>,
+ InstrItinData <tc_09faec3b, [InstrStage<1, [SLOT0]>]>,
+ InstrItinData <tc_0cb867f2, [InstrStage<1, [SLOT0]>]>,
+ InstrItinData <tc_1000eb10, [InstrStage<1, [SLOT3]>]>,
+ InstrItinData <tc_128719e8, [InstrStage<1, [SLOT0, SLOT1]>]>,
+ InstrItinData <tc_136c4786, [InstrStage<1, [SLOT0, SLOT1]>]>,
+ InstrItinData <tc_14da557c, [InstrStage<1, [SLOT0, SLOT1]>]>,
+ InstrItinData <tc_1b6011fb, [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3]>]>,
+ InstrItinData <tc_1b834fe7, [InstrStage<1, [SLOT2, SLOT3]>]>,
+ InstrItinData <tc_1e062b18, [InstrStage<1, [SLOT2, SLOT3]>]>,
+ InstrItinData <tc_1e69aa99, [InstrStage<1, [SLOT0, SLOT1]>]>,
+ InstrItinData <tc_1f9668cc, [InstrStage<1, [SLOT2]>]>,
+ InstrItinData <tc_1fe8323c, [InstrStage<1, [SLOT3]>]>,
+ InstrItinData <tc_20a8e109, [InstrStage<1, [SLOT0, SLOT1]>]>,
+ InstrItinData <tc_210b2456, [InstrStage<1, [SLOT0]>]>,
+ InstrItinData <tc_251c87b2, [InstrStage<1, [SLOT0, SLOT1]>]>,
+ InstrItinData <tc_261d9b78, [InstrStage<1, [SLOT0, SLOT1]>]>,
+ InstrItinData <tc_28d296df, [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3]>]>,
+ InstrItinData <tc_29c14515, [InstrStage<1, [SLOT0]>]>,
+ InstrItinData <tc_2aaab1e0, [InstrStage<1, [SLOT2, SLOT3]>]>,
+ InstrItinData <tc_2c8fe5ae, [InstrStage<1, [SLOT0]>]>,
+ InstrItinData <tc_2d1e6f5c, [InstrStage<1, [SLOT2, SLOT3]>]>,
+ InstrItinData <tc_2e55aa16, [InstrStage<1, [SLOT2, SLOT3]>]>,
+ InstrItinData <tc_30665cb0, [InstrStage<1, [SLOT0]>]>,
+ InstrItinData <tc_336e698c, [InstrStage<1, [SLOT0, SLOT1]>]>,
+ InstrItinData <tc_34e882a4, [InstrStage<1, [SLOT0]>]>,
+ InstrItinData <tc_35fb9d13, [InstrStage<1, [SLOT0]>]>,
+ InstrItinData <tc_37326008, [InstrStage<1, [SLOT2, SLOT3]>]>,
+ InstrItinData <tc_3993c58b, [InstrStage<1, [SLOT0]>]>,
+ InstrItinData <tc_3b4892c6, [InstrStage<1, [SLOT3]>]>,
+ InstrItinData <tc_3bea1824, [InstrStage<1, [SLOT2, SLOT3]>]>,
+ InstrItinData <tc_3c10f809, [InstrStage<1, [SLOT2, SLOT3]>]>,
+ InstrItinData <tc_3d905451, [InstrStage<1, [SLOT0, SLOT1]>]>,
+ InstrItinData <tc_3e61d314, [InstrStage<1, [SLOT0]>]>,
+ InstrItinData <tc_3eab77bd, [InstrStage<1, [SLOT0, SLOT1]>]>,
+ InstrItinData <tc_43068634, [InstrStage<1, [SLOT2, SLOT3]>]>,
+ InstrItinData <tc_45631a8d, [InstrStage<1, [SLOT0, SLOT1]>]>,
+ InstrItinData <tc_47ab9233, [InstrStage<1, [SLOT2, SLOT3]>]>,
+ InstrItinData <tc_47f0b7ad, [InstrStage<1, [SLOT2, SLOT3]>]>,
+ InstrItinData <tc_485bb57c, [InstrStage<1, [SLOT2, SLOT3]>]>,
+ InstrItinData <tc_4997da4a, [InstrStage<1, [SLOT3]>]>,
+ InstrItinData <tc_511f28f6, [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3]>]>,
+ InstrItinData <tc_537e2013, [InstrStage<1, [SLOT2, SLOT3]>]>,
+ InstrItinData <tc_53ee6546, [InstrStage<1, [SLOT0, SLOT1]>]>,
+ InstrItinData <tc_548f402d, [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3]>]>,
+ InstrItinData <tc_5625c6c1, [InstrStage<1, [SLOT0, SLOT1]>]>,
+ InstrItinData <tc_580a779c, [InstrStage<1, [SLOT0]>]>,
+ InstrItinData <tc_583510c7, [InstrStage<1, [SLOT2, SLOT3]>]>,
+ InstrItinData <tc_5d806107, [InstrStage<1, [SLOT2, SLOT3]>]>,
+ InstrItinData <tc_5fa2857c, [InstrStage<1, [SLOT2, SLOT3]>]>,
+ InstrItinData <tc_5fe9fcd0, [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3]>]>,
+ InstrItinData <tc_6264c5e0, [InstrStage<1, [SLOT2, SLOT3]>]>,
+ InstrItinData <tc_639d93ee, [InstrStage<1, [SLOT2, SLOT3]>]>,
+ InstrItinData <tc_63cd9d2d, [InstrStage<1, [SLOT2, SLOT3]>]>,
+ InstrItinData <tc_65dc7cc4, [InstrStage<1, [SLOT0, SLOT1]>]>,
+ InstrItinData <tc_69bb508b, [InstrStage<1, [SLOT2, SLOT3]>]>,
+ InstrItinData <tc_6c52d277, [InstrStage<1, [SLOT0, SLOT1]>]>,
+ InstrItinData <tc_6c576d46, [InstrStage<1, [SLOT0]>]>,
+ InstrItinData <tc_70cabf66, [InstrStage<1, [SLOT0, SLOT1]>]>,
+ InstrItinData <tc_7639d4b0, [InstrStage<1, [SLOT0, SLOT1]>]>,
+ InstrItinData <tc_7675c0e9, [InstrStage<1, [SLOT0, SLOT1]>]>,
+ InstrItinData <tc_76c4c5ef, [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3]>]>,
+ InstrItinData <tc_77781686, [InstrStage<1, [SLOT0]>]>,
+ InstrItinData <tc_78b3c689, [InstrStage<1, [SLOT2, SLOT3]>]>,
+ InstrItinData <tc_7986ba30, [InstrStage<1, [SLOT0]>]>,
+ InstrItinData <tc_7bc567a7, [InstrStage<1, [SLOT0, SLOT1]>]>,
+ InstrItinData <tc_7c2dcd4d, [InstrStage<1, [SLOT0, SLOT1]>]>,
+ InstrItinData <tc_7ca2ea10, [InstrStage<1, [SLOT2, SLOT3]>]>,
+ InstrItinData <tc_7d01cbdc, [InstrStage<1, [SLOT0]>]>,
+ InstrItinData <tc_7d9a56cd, [InstrStage<1, [SLOT0, SLOT1]>]>,
+ InstrItinData <tc_81a23d44, [InstrStage<1, [SLOT2, SLOT3]>]>,
+ InstrItinData <tc_821c4233, [InstrStage<1, [SLOT0, SLOT1]>]>,
+ InstrItinData <tc_82f0f122, [InstrStage<1, [SLOT3]>]>,
+ InstrItinData <tc_84630363, [InstrStage<1, [SLOT2]>]>,
+ InstrItinData <tc_86442910, [InstrStage<1, [SLOT0, SLOT1]>]>,
+ InstrItinData <tc_87601822, [InstrStage<1, [SLOT2, SLOT3]>]>,
+ InstrItinData <tc_88fa2da6, [InstrStage<1, [SLOT2, SLOT3]>]>,
+ InstrItinData <tc_8c8041e6, [InstrStage<1, [SLOT2, SLOT3]>]>,
+ InstrItinData <tc_8cb685d9, [InstrStage<1, [SLOT2, SLOT3]>]>,
+ InstrItinData <tc_8def9c57, [InstrStage<1, [SLOT0]>]>,
+ InstrItinData <tc_8f0a6bad, [InstrStage<1, [SLOT0, SLOT1]>]>,
+ InstrItinData <tc_8fab9ac3, [InstrStage<1, [SLOT0]>]>,
+ InstrItinData <tc_92d1833c, [InstrStage<1, [SLOT2, SLOT3]>]>,
+ InstrItinData <tc_94e6ffd9, [InstrStage<1, [SLOT2, SLOT3]>]>,
+ InstrItinData <tc_95c54f8b, [InstrStage<1, [SLOT0]>]>,
+ InstrItinData <tc_9a13af9d, [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3]>]>,
+ InstrItinData <tc_9b73d261, [InstrStage<1, [SLOT0, SLOT1]>]>,
+ InstrItinData <tc_9c18c9a5, [InstrStage<1, [SLOT2, SLOT3]>]>,
+ InstrItinData <tc_9c68db63, [InstrStage<1, [SLOT0]>]>,
+ InstrItinData <tc_9ce7a5ab, [InstrStage<1, [SLOT0]>]>,
+ InstrItinData <tc_9da3628f, [InstrStage<1, [SLOT0]>]>,
+ InstrItinData <tc_9dafb7d3, [InstrStage<1, [SLOT0, SLOT1]>]>,
+ InstrItinData <tc_9df8b0dc, [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3]>]>,
+ InstrItinData <tc_9e86015f, [InstrStage<1, [SLOT0]>]>,
+ InstrItinData <tc_9f518242, [InstrStage<1, [SLOT2, SLOT3]>]>,
+ InstrItinData <tc_a12a5971, [InstrStage<1, [SLOT2, SLOT3]>]>,
+ InstrItinData <tc_a1fb80e1, [InstrStage<1, [SLOT2]>]>,
+ InstrItinData <tc_a333d2a9, [InstrStage<1, [SLOT2, SLOT3]>]>,
+ InstrItinData <tc_a4567c39, [InstrStage<1, [SLOT0, SLOT1]>]>,
+ InstrItinData <tc_a87879e8, [InstrStage<1, [SLOT2, SLOT3]>]>,
+ InstrItinData <tc_a9c993d9, [InstrStage<1, [SLOT0]>]>,
+ InstrItinData <tc_aad55963, [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3]>]>,
+ InstrItinData <tc_ab1b5e74, [InstrStage<1, [SLOT2, SLOT3]>]>,
+ InstrItinData <tc_ae0722f7, [InstrStage<1, [SLOT2, SLOT3]>]>,
+ InstrItinData <tc_ae2c2dc2, [InstrStage<1, [SLOT2, SLOT3]>]>,
+ InstrItinData <tc_ae762521, [InstrStage<1, [SLOT0, SLOT1]>]>,
+ InstrItinData <tc_b08b653e, [InstrStage<1, [SLOT2]>]>,
+ InstrItinData <tc_b08be45e, [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3]>]>,
+ InstrItinData <tc_b0f50e3c, [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3]>]>,
+ InstrItinData <tc_b189ad4c, [InstrStage<1, [SLOT2]>]>,
+ InstrItinData <tc_b324366f, [InstrStage<1, [SLOT3]>]>,
+ InstrItinData <tc_b5bfaa60, [InstrStage<1, [SLOT2, SLOT3]>]>,
+ InstrItinData <tc_b5f5a094, [InstrStage<1, [SLOT0, SLOT1]>]>,
+ InstrItinData <tc_b86c7e8b, [InstrStage<1, [SLOT2, SLOT3]>]>,
+ InstrItinData <tc_baccf077, [InstrStage<1, [SLOT0, SLOT1]>]>,
+ InstrItinData <tc_bc5561d8, [InstrStage<1, [SLOT2, SLOT3]>]>,
+ InstrItinData <tc_bcf0e36e, [InstrStage<1, [SLOT3]>]>,
+ InstrItinData <tc_bd16579e, [InstrStage<1, [SLOT2, SLOT3]>]>,
+ InstrItinData <tc_be995eaf, [InstrStage<1, [SLOT0]>]>,
+ InstrItinData <tc_bf6fa601, [InstrStage<1, [SLOT0, SLOT1]>]>,
+ InstrItinData <tc_c0cd91a8, [InstrStage<1, [SLOT2, SLOT3]>]>,
+ InstrItinData <tc_c14739d5, [InstrStage<1, [SLOT0, SLOT1]>]>,
+ InstrItinData <tc_c1dbc916, [InstrStage<1, [SLOT0, SLOT1]>]>,
+ InstrItinData <tc_c58f771a, [InstrStage<1, [SLOT2, SLOT3]>]>,
+ InstrItinData <tc_c85212ca, [InstrStage<1, [SLOT0, SLOT1]>]>,
+ InstrItinData <tc_c8f9a6f6, [InstrStage<1, [SLOT0]>]>,
+ InstrItinData <tc_ca280e8b, [InstrStage<1, [SLOT2, SLOT3]>]>,
+ InstrItinData <tc_cbe45117, [InstrStage<1, [SLOT2]>]>,
+ InstrItinData <tc_cd321066, [InstrStage<1, [SLOT2, SLOT3]>]>,
+ InstrItinData <tc_d108a090, [InstrStage<1, [SLOT2, SLOT3]>]>,
+ InstrItinData <tc_d1b5a4b6, [InstrStage<1, [SLOT2, SLOT3]>]>,
+ InstrItinData <tc_d2609065, [InstrStage<1, [SLOT0, SLOT1]>]>,
+ InstrItinData <tc_d267fa19, [InstrStage<1, [SLOT2]>]>,
+ InstrItinData <tc_d2a33af5, [InstrStage<1, [SLOT0, SLOT1]>]>,
+ InstrItinData <tc_d63b71d1, [InstrStage<1, [SLOT2, SLOT3]>]>,
+ InstrItinData <tc_d6a805a8, [InstrStage<1, [SLOT3]>]>,
+ InstrItinData <tc_d95f4e98, [InstrStage<1, [SLOT2, SLOT3]>]>,
+ InstrItinData <tc_da79106e, [InstrStage<1, [SLOT0]>]>,
+ InstrItinData <tc_dbe218dd, [InstrStage<1, [SLOT0]>]>,
+ InstrItinData <tc_dcfee7ae, [InstrStage<1, [SLOT0]>]>,
+ InstrItinData <tc_e17ce9ad, [InstrStage<1, [SLOT2, SLOT3]>]>,
+ InstrItinData <tc_e2480a7f, [InstrStage<1, [SLOT0]>]>,
+ InstrItinData <tc_e2c08bb4, [InstrStage<1, [SLOT2, SLOT3]>]>,
+ InstrItinData <tc_e2c31426, [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3]>]>,
+ InstrItinData <tc_e578178f, [InstrStage<1, [SLOT0, SLOT1]>]>,
+ InstrItinData <tc_e836c161, [InstrStage<1, [SLOT2, SLOT3]>]>,
+ InstrItinData <tc_e8c7a357, [InstrStage<1, [SLOT0, SLOT1]>]>,
+ InstrItinData <tc_eb07ef6f, [InstrStage<1, [SLOT2, SLOT3]>]>,
+ InstrItinData <tc_ecfaae86, [InstrStage<1, [SLOT2]>]>,
+ InstrItinData <tc_ef0ebaaa, [InstrStage<1, [SLOT0]>]>,
+ InstrItinData <tc_ef2676fd, [InstrStage<1, [SLOT0]>]>,
+ InstrItinData <tc_f027ebe9, [InstrStage<1, [SLOT0]>]>,
+ InstrItinData <tc_f055fbb6, [InstrStage<1, [SLOT3]>]>,
+ InstrItinData <tc_f1240c08, [InstrStage<1, [SLOT2, SLOT3]>]>,
+ InstrItinData <tc_f16d5b17, [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3]>]>,
+ InstrItinData <tc_f1aa2cdb, [InstrStage<1, [SLOT2, SLOT3]>]>,
+ InstrItinData <tc_f26aa619, [InstrStage<1, [SLOT0, SLOT1]>]>,
+ InstrItinData <tc_f4608adc, [InstrStage<1, [SLOT0]>]>,
+ InstrItinData <tc_faab1248, [InstrStage<1, [SLOT2, SLOT3]>]>,
+ InstrItinData <tc_fcee8723, [InstrStage<1, [SLOT0, SLOT1]>]>,
+ InstrItinData <tc_feb4974b, [InstrStage<1, [SLOT3]>]> ];
+}
+
+class DepScalarItinV5 {
+ list<InstrItinData> DepScalarItinV5_list = [
+ InstrItinData <tc_049dfb74, [InstrStage<1, [SLOT2]>]>,
+ InstrItinData <tc_0767081f, [InstrStage<1, [SLOT2, SLOT3]>]>,
+ InstrItinData <tc_07ac815d, [InstrStage<1, [SLOT2]>]>,
+ InstrItinData <tc_090485bb, [InstrStage<1, [SLOT2, SLOT3]>]>,
+ InstrItinData <tc_09c86199, [InstrStage<1, [SLOT2, SLOT3]>]>,
+ InstrItinData <tc_09faec3b, [InstrStage<1, [SLOT0]>]>,
+ InstrItinData <tc_0cb867f2, [InstrStage<1, [SLOT0]>]>,
+ InstrItinData <tc_1000eb10, [InstrStage<1, [SLOT3]>]>,
+ InstrItinData <tc_128719e8, [InstrStage<1, [SLOT0, SLOT1]>]>,
+ InstrItinData <tc_136c4786, [InstrStage<1, [SLOT0, SLOT1]>]>,
+ InstrItinData <tc_14da557c, [InstrStage<1, [SLOT0, SLOT1]>]>,
+ InstrItinData <tc_1b6011fb, [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3]>]>,
+ InstrItinData <tc_1b834fe7, [InstrStage<1, [SLOT2, SLOT3]>]>,
+ InstrItinData <tc_1e062b18, [InstrStage<1, [SLOT2, SLOT3]>]>,
+ InstrItinData <tc_1e69aa99, [InstrStage<1, [SLOT0, SLOT1]>]>,
+ InstrItinData <tc_1f9668cc, [InstrStage<1, [SLOT2]>]>,
+ InstrItinData <tc_1fe8323c, [InstrStage<1, [SLOT3]>]>,
+ InstrItinData <tc_20a8e109, [InstrStage<1, [SLOT0, SLOT1]>]>,
+ InstrItinData <tc_210b2456, [InstrStage<1, [SLOT0]>]>,
+ InstrItinData <tc_251c87b2, [InstrStage<1, [SLOT0, SLOT1]>]>,
+ InstrItinData <tc_261d9b78, [InstrStage<1, [SLOT0, SLOT1]>]>,
+ InstrItinData <tc_28d296df, [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3]>]>,
+ InstrItinData <tc_29c14515, [InstrStage<1, [SLOT0]>]>,
+ InstrItinData <tc_2aaab1e0, [InstrStage<1, [SLOT2, SLOT3]>]>,
+ InstrItinData <tc_2c8fe5ae, [InstrStage<1, [SLOT0]>]>,
+ InstrItinData <tc_2d1e6f5c, [InstrStage<1, [SLOT2, SLOT3]>]>,
+ InstrItinData <tc_2e55aa16, [InstrStage<1, [SLOT2, SLOT3]>]>,
+ InstrItinData <tc_30665cb0, [InstrStage<1, [SLOT0]>]>,
+ InstrItinData <tc_336e698c, [InstrStage<1, [SLOT0, SLOT1]>]>,
+ InstrItinData <tc_34e882a4, [InstrStage<1, [SLOT0]>]>,
+ InstrItinData <tc_35fb9d13, [InstrStage<1, [SLOT0]>]>,
+ InstrItinData <tc_37326008, [InstrStage<1, [SLOT2, SLOT3]>]>,
+ InstrItinData <tc_3993c58b, [InstrStage<1, [SLOT0]>]>,
+ InstrItinData <tc_3b4892c6, [InstrStage<1, [SLOT3]>]>,
+ InstrItinData <tc_3bea1824, [InstrStage<1, [SLOT2, SLOT3]>]>,
+ InstrItinData <tc_3c10f809, [InstrStage<1, [SLOT2, SLOT3]>]>,
+ InstrItinData <tc_3d905451, [InstrStage<1, [SLOT0, SLOT1]>]>,
+ InstrItinData <tc_3e61d314, [InstrStage<1, [SLOT0]>]>,
+ InstrItinData <tc_3eab77bd, [InstrStage<1, [SLOT0, SLOT1]>]>,
+ InstrItinData <tc_43068634, [InstrStage<1, [SLOT2, SLOT3]>]>,
+ InstrItinData <tc_45631a8d, [InstrStage<1, [SLOT0, SLOT1]>]>,
+ InstrItinData <tc_47ab9233, [InstrStage<1, [SLOT2, SLOT3]>]>,
+ InstrItinData <tc_47f0b7ad, [InstrStage<1, [SLOT2, SLOT3]>]>,
+ InstrItinData <tc_485bb57c, [InstrStage<1, [SLOT2, SLOT3]>]>,
+ InstrItinData <tc_4997da4a, [InstrStage<1, [SLOT3]>]>,
+ InstrItinData <tc_511f28f6, [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3]>]>,
+ InstrItinData <tc_537e2013, [InstrStage<1, [SLOT2, SLOT3]>]>,
+ InstrItinData <tc_53ee6546, [InstrStage<1, [SLOT0, SLOT1]>]>,
+ InstrItinData <tc_548f402d, [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3]>]>,
+ InstrItinData <tc_5625c6c1, [InstrStage<1, [SLOT0, SLOT1]>]>,
+ InstrItinData <tc_580a779c, [InstrStage<1, [SLOT0]>]>,
+ InstrItinData <tc_583510c7, [InstrStage<1, [SLOT2, SLOT3]>]>,
+ InstrItinData <tc_5d806107, [InstrStage<1, [SLOT2, SLOT3]>]>,
+ InstrItinData <tc_5fa2857c, [InstrStage<1, [SLOT2, SLOT3]>]>,
+ InstrItinData <tc_5fe9fcd0, [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3]>]>,
+ InstrItinData <tc_6264c5e0, [InstrStage<1, [SLOT2, SLOT3]>]>,
+ InstrItinData <tc_639d93ee, [InstrStage<1, [SLOT2, SLOT3]>]>,
+ InstrItinData <tc_63cd9d2d, [InstrStage<1, [SLOT2, SLOT3]>]>,
+ InstrItinData <tc_65dc7cc4, [InstrStage<1, [SLOT0, SLOT1]>]>,
+ InstrItinData <tc_69bb508b, [InstrStage<1, [SLOT2, SLOT3]>]>,
+ InstrItinData <tc_6c52d277, [InstrStage<1, [SLOT0, SLOT1]>]>,
+ InstrItinData <tc_6c576d46, [InstrStage<1, [SLOT0]>]>,
+ InstrItinData <tc_70cabf66, [InstrStage<1, [SLOT0, SLOT1]>]>,
+ InstrItinData <tc_7639d4b0, [InstrStage<1, [SLOT0, SLOT1]>]>,
+ InstrItinData <tc_7675c0e9, [InstrStage<1, [SLOT0, SLOT1]>]>,
+ InstrItinData <tc_76c4c5ef, [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3]>]>,
+ InstrItinData <tc_77781686, [InstrStage<1, [SLOT0]>]>,
+ InstrItinData <tc_78b3c689, [InstrStage<1, [SLOT2, SLOT3]>]>,
+ InstrItinData <tc_7986ba30, [InstrStage<1, [SLOT0]>]>,
+ InstrItinData <tc_7bc567a7, [InstrStage<1, [SLOT0, SLOT1]>]>,
+ InstrItinData <tc_7c2dcd4d, [InstrStage<1, [SLOT0, SLOT1]>]>,
+ InstrItinData <tc_7ca2ea10, [InstrStage<1, [SLOT2, SLOT3]>]>,
+ InstrItinData <tc_7d01cbdc, [InstrStage<1, [SLOT0]>]>,
+ InstrItinData <tc_7d9a56cd, [InstrStage<1, [SLOT0, SLOT1]>]>,
+ InstrItinData <tc_81a23d44, [InstrStage<1, [SLOT2, SLOT3]>]>,
+ InstrItinData <tc_821c4233, [InstrStage<1, [SLOT0, SLOT1]>]>,
+ InstrItinData <tc_82f0f122, [InstrStage<1, [SLOT3]>]>,
+ InstrItinData <tc_84630363, [InstrStage<1, [SLOT2]>]>,
+ InstrItinData <tc_86442910, [InstrStage<1, [SLOT0, SLOT1]>]>,
+ InstrItinData <tc_87601822, [InstrStage<1, [SLOT2, SLOT3]>]>,
+ InstrItinData <tc_88fa2da6, [InstrStage<1, [SLOT2, SLOT3]>]>,
+ InstrItinData <tc_8c8041e6, [InstrStage<1, [SLOT2, SLOT3]>]>,
+ InstrItinData <tc_8cb685d9, [InstrStage<1, [SLOT2, SLOT3]>]>,
+ InstrItinData <tc_8def9c57, [InstrStage<1, [SLOT0]>]>,
+ InstrItinData <tc_8f0a6bad, [InstrStage<1, [SLOT0, SLOT1]>]>,
+ InstrItinData <tc_8fab9ac3, [InstrStage<1, [SLOT0]>]>,
+ InstrItinData <tc_92d1833c, [InstrStage<1, [SLOT2, SLOT3]>]>,
+ InstrItinData <tc_94e6ffd9, [InstrStage<1, [SLOT2, SLOT3]>]>,
+ InstrItinData <tc_95c54f8b, [InstrStage<1, [SLOT0]>]>,
+ InstrItinData <tc_9a13af9d, [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3]>]>,
+ InstrItinData <tc_9b73d261, [InstrStage<1, [SLOT0, SLOT1]>]>,
+ InstrItinData <tc_9c18c9a5, [InstrStage<1, [SLOT2, SLOT3]>]>,
+ InstrItinData <tc_9c68db63, [InstrStage<1, [SLOT0]>]>,
+ InstrItinData <tc_9ce7a5ab, [InstrStage<1, [SLOT0]>]>,
+ InstrItinData <tc_9da3628f, [InstrStage<1, [SLOT0]>]>,
+ InstrItinData <tc_9dafb7d3, [InstrStage<1, [SLOT0, SLOT1]>]>,
+ InstrItinData <tc_9df8b0dc, [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3]>]>,
+ InstrItinData <tc_9e86015f, [InstrStage<1, [SLOT0]>]>,
+ InstrItinData <tc_9f518242, [InstrStage<1, [SLOT2, SLOT3]>]>,
+ InstrItinData <tc_a12a5971, [InstrStage<1, [SLOT2, SLOT3]>]>,
+ InstrItinData <tc_a1fb80e1, [InstrStage<1, [SLOT2]>]>,
+ InstrItinData <tc_a333d2a9, [InstrStage<1, [SLOT2, SLOT3]>]>,
+ InstrItinData <tc_a4567c39, [InstrStage<1, [SLOT0, SLOT1]>]>,
+ InstrItinData <tc_a87879e8, [InstrStage<1, [SLOT2, SLOT3]>]>,
+ InstrItinData <tc_a9c993d9, [InstrStage<1, [SLOT0]>]>,
+ InstrItinData <tc_aad55963, [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3]>]>,
+ InstrItinData <tc_ab1b5e74, [InstrStage<1, [SLOT2, SLOT3]>]>,
+ InstrItinData <tc_ae0722f7, [InstrStage<1, [SLOT2, SLOT3]>]>,
+ InstrItinData <tc_ae2c2dc2, [InstrStage<1, [SLOT2, SLOT3]>]>,
+ InstrItinData <tc_ae762521, [InstrStage<1, [SLOT0, SLOT1]>]>,
+ InstrItinData <tc_b08b653e, [InstrStage<1, [SLOT2]>]>,
+ InstrItinData <tc_b08be45e, [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3]>]>,
+ InstrItinData <tc_b0f50e3c, [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3]>]>,
+ InstrItinData <tc_b189ad4c, [InstrStage<1, [SLOT2]>]>,
+ InstrItinData <tc_b324366f, [InstrStage<1, [SLOT3]>]>,
+ InstrItinData <tc_b5bfaa60, [InstrStage<1, [SLOT2, SLOT3]>]>,
+ InstrItinData <tc_b5f5a094, [InstrStage<1, [SLOT0, SLOT1]>]>,
+ InstrItinData <tc_b86c7e8b, [InstrStage<1, [SLOT2, SLOT3]>]>,
+ InstrItinData <tc_baccf077, [InstrStage<1, [SLOT0, SLOT1]>]>,
+ InstrItinData <tc_bc5561d8, [InstrStage<1, [SLOT2, SLOT3]>]>,
+ InstrItinData <tc_bcf0e36e, [InstrStage<1, [SLOT3]>]>,
+ InstrItinData <tc_bd16579e, [InstrStage<1, [SLOT2, SLOT3]>]>,
+ InstrItinData <tc_be995eaf, [InstrStage<1, [SLOT0]>]>,
+ InstrItinData <tc_bf6fa601, [InstrStage<1, [SLOT0, SLOT1]>]>,
+ InstrItinData <tc_c0cd91a8, [InstrStage<1, [SLOT2, SLOT3]>]>,
+ InstrItinData <tc_c14739d5, [InstrStage<1, [SLOT0, SLOT1]>]>,
+ InstrItinData <tc_c1dbc916, [InstrStage<1, [SLOT0, SLOT1]>]>,
+ InstrItinData <tc_c58f771a, [InstrStage<1, [SLOT2, SLOT3]>]>,
+ InstrItinData <tc_c85212ca, [InstrStage<1, [SLOT0, SLOT1]>]>,
+ InstrItinData <tc_c8f9a6f6, [InstrStage<1, [SLOT0]>]>,
+ InstrItinData <tc_ca280e8b, [InstrStage<1, [SLOT2, SLOT3]>]>,
+ InstrItinData <tc_cbe45117, [InstrStage<1, [SLOT2]>]>,
+ InstrItinData <tc_cd321066, [InstrStage<1, [SLOT2, SLOT3]>]>,
+ InstrItinData <tc_d108a090, [InstrStage<1, [SLOT2, SLOT3]>]>,
+ InstrItinData <tc_d1b5a4b6, [InstrStage<1, [SLOT2, SLOT3]>]>,
+ InstrItinData <tc_d2609065, [InstrStage<1, [SLOT0, SLOT1]>]>,
+ InstrItinData <tc_d267fa19, [InstrStage<1, [SLOT2]>]>,
+ InstrItinData <tc_d2a33af5, [InstrStage<1, [SLOT0, SLOT1]>]>,
+ InstrItinData <tc_d63b71d1, [InstrStage<1, [SLOT2, SLOT3]>]>,
+ InstrItinData <tc_d6a805a8, [InstrStage<1, [SLOT3]>]>,
+ InstrItinData <tc_d95f4e98, [InstrStage<1, [SLOT2, SLOT3]>]>,
+ InstrItinData <tc_da79106e, [InstrStage<1, [SLOT0]>]>,
+ InstrItinData <tc_dbe218dd, [InstrStage<1, [SLOT0]>]>,
+ InstrItinData <tc_dcfee7ae, [InstrStage<1, [SLOT0]>]>,
+ InstrItinData <tc_e17ce9ad, [InstrStage<1, [SLOT2, SLOT3]>]>,
+ InstrItinData <tc_e2480a7f, [InstrStage<1, [SLOT0]>]>,
+ InstrItinData <tc_e2c08bb4, [InstrStage<1, [SLOT2, SLOT3]>]>,
+ InstrItinData <tc_e2c31426, [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3]>]>,
+ InstrItinData <tc_e578178f, [InstrStage<1, [SLOT0, SLOT1]>]>,
+ InstrItinData <tc_e836c161, [InstrStage<1, [SLOT2, SLOT3]>]>,
+ InstrItinData <tc_e8c7a357, [InstrStage<1, [SLOT0, SLOT1]>]>,
+ InstrItinData <tc_eb07ef6f, [InstrStage<1, [SLOT2, SLOT3]>]>,
+ InstrItinData <tc_ecfaae86, [InstrStage<1, [SLOT2]>]>,
+ InstrItinData <tc_ef0ebaaa, [InstrStage<1, [SLOT0]>]>,
+ InstrItinData <tc_ef2676fd, [InstrStage<1, [SLOT0]>]>,
+ InstrItinData <tc_f027ebe9, [InstrStage<1, [SLOT0]>]>,
+ InstrItinData <tc_f055fbb6, [InstrStage<1, [SLOT3]>]>,
+ InstrItinData <tc_f1240c08, [InstrStage<1, [SLOT2, SLOT3]>]>,
+ InstrItinData <tc_f16d5b17, [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3]>]>,
+ InstrItinData <tc_f1aa2cdb, [InstrStage<1, [SLOT2, SLOT3]>]>,
+ InstrItinData <tc_f26aa619, [InstrStage<1, [SLOT0, SLOT1]>]>,
+ InstrItinData <tc_f4608adc, [InstrStage<1, [SLOT0]>]>,
+ InstrItinData <tc_faab1248, [InstrStage<1, [SLOT2, SLOT3]>]>,
+ InstrItinData <tc_fcee8723, [InstrStage<1, [SLOT0, SLOT1]>]>,
+ InstrItinData <tc_feb4974b, [InstrStage<1, [SLOT3]>]> ];
+}
+
+class DepScalarItinV55 {
+ list<InstrItinData> DepScalarItinV55_list = [
+ InstrItinData <tc_049dfb74, /*tc_2early*/
+ [InstrStage<1, [SLOT2]>], [1],
+ [Hex_FWD]>,
+
+ InstrItinData <tc_0767081f, /*tc_2early*/
+ [InstrStage<1, [SLOT2, SLOT3]>], [2, 2],
+ [Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_07ac815d, /*tc_2early*/
+ [InstrStage<1, [SLOT2]>], [2, 1],
+ [Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_090485bb, /*tc_2*/
+ [InstrStage<1, [SLOT2, SLOT3]>], [4, 2, 2, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_09c86199, /*tc_3x*/
+ [InstrStage<1, [SLOT2, SLOT3]>], [4, 4, 1, 1],
+ [Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_09faec3b, /*tc_3stall*/
+ [InstrStage<1, [SLOT0]>], [3, 2, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_0cb867f2, /*tc_ld*/
+ [InstrStage<1, [SLOT0]>], [4, 2, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_1000eb10, /*tc_3x*/
+ [InstrStage<1, [SLOT3]>], [2, 2],
+ [Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_128719e8, /*tc_ld*/
+ [InstrStage<1, [SLOT0, SLOT1]>], [4, 3, 1, 1, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_136c4786, /*tc_ld*/
+ [InstrStage<1, [SLOT0, SLOT1]>], [4, 2, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_14da557c, /*tc_ld*/
+ [InstrStage<1, [SLOT0, SLOT1]>], [4, 2, 1, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_1b6011fb, /*tc_1*/
+ [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3]>], [3, 2, 2, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_1b834fe7, /*tc_2early*/
+ [InstrStage<1, [SLOT2, SLOT3]>], [2, 2],
+ [Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_1e062b18, /*tc_1*/
+ [InstrStage<1, [SLOT2, SLOT3]>], [3, 2, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_1e69aa99, /*tc_st*/
+ [InstrStage<1, [SLOT0, SLOT1]>], [2, 1, 2, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_1f9668cc, /*tc_2early*/
+ [InstrStage<1, [SLOT2]>], [3, 1],
+ [Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_1fe8323c, /*tc_2*/
+ [InstrStage<1, [SLOT3]>], [4, 2],
+ [Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_20a8e109, /*tc_st*/
+ [InstrStage<1, [SLOT0, SLOT1]>], [3, 1, 2, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_210b2456, /*tc_st*/
+ [InstrStage<1, [SLOT0]>], [1, 2, 2, 3],
+ [Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_251c87b2, /*tc_st*/
+ [InstrStage<1, [SLOT0, SLOT1]>], [3, 1, 2, 2, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_261d9b78, /*tc_ld*/
+ [InstrStage<1, [SLOT0, SLOT1]>], [4, 3, 2, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_28d296df, /*tc_1*/
+ [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3]>], [3, 3, 2, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_29c14515, /*tc_ld*/
+ [InstrStage<1, [SLOT0]>], [4, 1],
+ [Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_2aaab1e0, /*tc_3*/
+ [InstrStage<1, [SLOT2, SLOT3]>], [4, 1, 1, 1],
+ [Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_2c8fe5ae, /*tc_st*/
+ [InstrStage<1, [SLOT0]>], [2, 2, 3],
+ [Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_2d1e6f5c, /*tc_3*/
+ [InstrStage<1, [SLOT2, SLOT3]>], [4, 1, 1, 1],
+ [Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_2e55aa16, /*tc_3*/
+ [InstrStage<1, [SLOT2, SLOT3]>], [4, 1, 1, 1, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_30665cb0, /*tc_st*/
+ [InstrStage<1, [SLOT0]>], [1],
+ [Hex_FWD]>,
+
+ InstrItinData <tc_336e698c, /*tc_st*/
+ [InstrStage<1, [SLOT0, SLOT1]>], [3, 2, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_34e882a4, /*tc_ld*/
+ [InstrStage<1, [SLOT0]>], [1],
+ [Hex_FWD]>,
+
+ InstrItinData <tc_35fb9d13, /*tc_2early*/
+ [InstrStage<1, [SLOT0]>], [],
+ []>,
+
+ InstrItinData <tc_37326008, /*tc_2*/
+ [InstrStage<1, [SLOT2, SLOT3]>], [4, 2, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_3993c58b, /*tc_3stall*/
+ [InstrStage<1, [SLOT0]>], [4, 3, 1],
+ [Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_3b4892c6, /*tc_3x*/
+ [InstrStage<1, [SLOT3]>], [4, 2],
+ [Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_3bea1824, /*tc_3x*/
+ [InstrStage<1, [SLOT2, SLOT3]>], [4, 1, 1],
+ [Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_3c10f809, /*tc_1*/
+ [InstrStage<1, [SLOT2, SLOT3]>], [3, 2, 2, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_3d905451, /*tc_st*/
+ [InstrStage<1, [SLOT0, SLOT1]>], [2, 1, 2, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_3e61d314, /*tc_3stall*/
+ [InstrStage<1, [SLOT0]>], [1, 3, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_3eab77bd, /*tc_ld*/
+ [InstrStage<1, [SLOT0, SLOT1]>], [4, 3, 1, 2, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_43068634, /*tc_2early*/
+ [InstrStage<1, [SLOT2, SLOT3]>], [3, 2, 2, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_45631a8d, /*tc_st*/
+ [InstrStage<1, [SLOT0, SLOT1]>], [1, 1, 2, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_47ab9233, /*tc_2*/
+ [InstrStage<1, [SLOT2, SLOT3]>], [4, 2, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_47f0b7ad, /*tc_2early*/
+ [InstrStage<1, [SLOT2, SLOT3]>], [3, 1],
+ [Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_485bb57c, /*tc_3x*/
+ [InstrStage<1, [SLOT2, SLOT3]>], [4, 2],
+ [Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_4997da4a, /*tc_3x*/
+ [InstrStage<1, [SLOT3]>], [1],
+ [Hex_FWD]>,
+
+ InstrItinData <tc_511f28f6, /*tc_1*/
+ [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3]>], [3, 2, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_537e2013, /*tc_2early*/
+ [InstrStage<1, [SLOT2, SLOT3]>], [3, 2],
+ [Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_53ee6546, /*tc_st*/
+ [InstrStage<1, [SLOT0, SLOT1]>], [1, 2, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_548f402d, /*tc_1*/
+ [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3]>], [3, 2, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_5625c6c1, /*tc_ld*/
+ [InstrStage<1, [SLOT0, SLOT1]>], [4, 1, 1, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_580a779c, /*tc_3stall*/
+ [InstrStage<1, [SLOT0]>], [3, 1, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_583510c7, /*tc_2*/
+ [InstrStage<1, [SLOT2, SLOT3]>], [4, 4, 2, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_5d806107, /*tc_3stall*/
+ [InstrStage<1, [SLOT2, SLOT3]>], [4, 1, 1],
+ [Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_5fa2857c, /*tc_2early*/
+ [InstrStage<1, [SLOT2, SLOT3]>], [3, 1, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_5fe9fcd0, /*tc_2early*/
+ [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3]>], [3, 1, 1],
+ [Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_6264c5e0, /*tc_3x*/
+ [InstrStage<1, [SLOT2, SLOT3]>], [4, 1, 1, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_639d93ee, /*tc_2early*/
+ [InstrStage<1, [SLOT2, SLOT3]>], [2],
+ [Hex_FWD]>,
+
+ InstrItinData <tc_63cd9d2d, /*tc_2*/
+ [InstrStage<1, [SLOT2, SLOT3]>], [4, 2, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_65dc7cc4, /*tc_ld*/
+ [InstrStage<1, [SLOT0, SLOT1]>], [4, 3, 1, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_69bb508b, /*tc_3x*/
+ [InstrStage<1, [SLOT2, SLOT3]>], [4, 2, 2, 1],
+ [Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_6c52d277, /*tc_st*/
+ [InstrStage<1, [SLOT0, SLOT1]>], [1, 2],
+ [Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_6c576d46, /*tc_st*/
+ [InstrStage<1, [SLOT0]>], [1, 2, 3],
+ [Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_70cabf66, /*tc_ld*/
+ [InstrStage<1, [SLOT0, SLOT1]>], [4, 2],
+ [Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_7639d4b0, /*tc_st*/
+ [InstrStage<1, [SLOT0, SLOT1]>], [3, 1, 1, 2, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_7675c0e9, /*tc_st*/
+ [InstrStage<1, [SLOT0, SLOT1]>], [3, 3, 1, 2, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_76c4c5ef, /*tc_2*/
+ [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3]>], [4, 2, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_77781686, /*tc_st*/
+ [InstrStage<1, [SLOT0]>], [2, 1, 1, 2, 3],
+ [Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_78b3c689, /*tc_1*/
+ [InstrStage<1, [SLOT2, SLOT3]>], [3, 2],
+ [Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_7986ba30, /*tc_st*/
+ [InstrStage<1, [SLOT0]>], [3, 2, 3],
+ [Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_7bc567a7, /*tc_st*/
+ [InstrStage<1, [SLOT0, SLOT1]>], [2, 1, 1, 2, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_7c2dcd4d, /*tc_1*/
+ [InstrStage<1, [SLOT0, SLOT1]>], [3],
+ [Hex_FWD]>,
+
+ InstrItinData <tc_7ca2ea10, /*tc_1*/
+ [InstrStage<1, [SLOT2, SLOT3]>], [3, 2, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_7d01cbdc, /*tc_3stall*/
+ [InstrStage<1, [SLOT0]>], [4, 1, 1],
+ [Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_7d9a56cd, /*tc_ld*/
+ [InstrStage<1, [SLOT0, SLOT1]>], [4, 1, 2, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_81a23d44, /*tc_2early*/
+ [InstrStage<1, [SLOT2, SLOT3]>], [3, 2],
+ [Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_821c4233, /*tc_1*/
+ [InstrStage<1, [SLOT0, SLOT1]>], [3, 2, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_82f0f122, /*tc_3x*/
+ [InstrStage<1, [SLOT3]>], [4, 1],
+ [Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_84630363, /*tc_2early*/
+ [InstrStage<1, [SLOT2]>], [2, 1],
+ [Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_86442910, /*tc_ld*/
+ [InstrStage<1, [SLOT0, SLOT1]>], [],
+ []>,
+
+ InstrItinData <tc_87601822, /*tc_2*/
+ [InstrStage<1, [SLOT2, SLOT3]>], [4, 2, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_88fa2da6, /*tc_1*/
+ [InstrStage<1, [SLOT2, SLOT3]>], [3, 2],
+ [Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_8c8041e6, /*tc_3x*/
+ [InstrStage<1, [SLOT2, SLOT3]>], [4, 1, 1],
+ [Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_8cb685d9, /*tc_3x*/
+ [InstrStage<1, [SLOT2, SLOT3]>], [4, 2, 1, 1],
+ [Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_8def9c57, /*tc_st*/
+ [InstrStage<1, [SLOT0]>], [3, 1, 1, 2, 3],
+ [Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_8f0a6bad, /*tc_st*/
+ [InstrStage<1, [SLOT0, SLOT1]>], [3, 1, 2, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_8fab9ac3, /*tc_st*/
+ [InstrStage<1, [SLOT0]>], [3, 3, 1, 2, 3],
+ [Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_92d1833c, /*tc_2early*/
+ [InstrStage<1, [SLOT2, SLOT3]>], [1, 1, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_94e6ffd9, /*tc_2*/
+ [InstrStage<1, [SLOT2, SLOT3]>], [4, 2],
+ [Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_95c54f8b, /*tc_3stall*/
+ [InstrStage<1, [SLOT0]>], [],
+ []>,
+
+ InstrItinData <tc_9a13af9d, /*tc_1*/
+ [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3]>], [2],
+ [Hex_FWD]>,
+
+ InstrItinData <tc_9b73d261, /*tc_st*/
+ [InstrStage<1, [SLOT0, SLOT1]>], [3, 2, 1, 2, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_9c18c9a5, /*tc_1*/
+ [InstrStage<1, [SLOT2, SLOT3]>], [3, 2, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_9c68db63, /*tc_st*/
+ [InstrStage<1, [SLOT0]>], [3, 1, 2, 2, 3],
+ [Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_9ce7a5ab, /*tc_3stall*/
+ [InstrStage<1, [SLOT0]>], [4, 2, 1],
+ [Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_9da3628f, /*tc_st*/
+ [InstrStage<1, [SLOT0]>], [2, 1, 2, 3],
+ [Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_9dafb7d3, /*tc_ld*/
+ [InstrStage<1, [SLOT0, SLOT1]>], [4, 2, 1, 1, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_9df8b0dc, /*tc_2early*/
+ [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3]>], [3, 1, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_9e86015f, /*tc_st*/
+ [InstrStage<1, [SLOT0]>], [2, 3],
+ [Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_9f518242, /*tc_1*/
+ [InstrStage<1, [SLOT2, SLOT3]>], [3, 2, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_a12a5971, /*tc_3x*/
+ [InstrStage<1, [SLOT2, SLOT3]>], [4, 2, 1, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_a1fb80e1, /*tc_2early*/
+ [InstrStage<1, [SLOT2]>], [2, 1],
+ [Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_a333d2a9, /*tc_2early*/
+ [InstrStage<1, [SLOT2, SLOT3]>], [2],
+ [Hex_FWD]>,
+
+ InstrItinData <tc_a4567c39, /*tc_st*/
+ [InstrStage<1, [SLOT0, SLOT1]>], [1, 2, 2, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_a87879e8, /*tc_3stall*/
+ [InstrStage<1, [SLOT2, SLOT3]>], [4, 4, 1, 1, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_a9c993d9, /*tc_st*/
+ [InstrStage<1, [SLOT0]>], [1, 2, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_aad55963, /*tc_2early*/
+ [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3]>], [],
+ []>,
+
+ InstrItinData <tc_ab1b5e74, /*tc_1*/
+ [InstrStage<1, [SLOT2, SLOT3]>], [3, 2],
+ [Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_ae0722f7, /*tc_3*/
+ [InstrStage<1, [SLOT2, SLOT3]>], [4, 4, 1, 1, 1],
+ [Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_ae2c2dc2, /*tc_3x*/
+ [InstrStage<1, [SLOT2, SLOT3]>], [4, 1, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_ae762521, /*tc_ld*/
+ [InstrStage<1, [SLOT0, SLOT1]>], [4, 3, 2, 1, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_b08b653e, /*tc_2early*/
+ [InstrStage<1, [SLOT2]>], [1],
+ [Hex_FWD]>,
+
+ InstrItinData <tc_b08be45e, /*tc_1*/
+ [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3]>], [3, 3, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_b0f50e3c, /*tc_2*/
+ [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3]>], [4, 2, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_b189ad4c, /*tc_2early*/
+ [InstrStage<1, [SLOT2]>], [2],
+ [Hex_FWD]>,
+
+ InstrItinData <tc_b324366f, /*tc_2early*/
+ [InstrStage<1, [SLOT3]>], [1, 2],
+ [Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_b5bfaa60, /*tc_2early*/
+ [InstrStage<1, [SLOT2, SLOT3]>], [2, 2],
+ [Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_b5f5a094, /*tc_ld*/
+ [InstrStage<1, [SLOT0, SLOT1]>], [4, 3, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_b86c7e8b, /*tc_1*/
+ [InstrStage<1, [SLOT2, SLOT3]>], [3, 2],
+ [Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_baccf077, /*tc_ld*/
+ [InstrStage<1, [SLOT0, SLOT1]>], [4, 2, 1, 2, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_bc5561d8, /*tc_3x*/
+ [InstrStage<1, [SLOT2, SLOT3]>], [4, 2, 1, 1, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_bcf0e36e, /*tc_3x*/
+ [InstrStage<1, [SLOT3]>], [],
+ []>,
+
+ InstrItinData <tc_bd16579e, /*tc_1*/
+ [InstrStage<1, [SLOT2, SLOT3]>], [3, 2, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_be995eaf, /*tc_st*/
+ [InstrStage<1, [SLOT0]>], [1, 1, 2, 3],
+ [Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_bf6fa601, /*tc_ld*/
+ [InstrStage<1, [SLOT0, SLOT1]>], [4, 1, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_c0cd91a8, /*tc_2*/
+ [InstrStage<1, [SLOT2, SLOT3]>], [4, 2, 2, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_c14739d5, /*tc_st*/
+ [InstrStage<1, [SLOT0, SLOT1]>], [2, 2],
+ [Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_c1dbc916, /*tc_ld*/
+ [InstrStage<1, [SLOT0, SLOT1]>], [4, 2],
+ [Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_c58f771a, /*tc_2early*/
+ [InstrStage<1, [SLOT2, SLOT3]>], [3, 1, 1],
+ [Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_c85212ca, /*tc_st*/
+ [InstrStage<1, [SLOT0, SLOT1]>], [2, 2, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_c8f9a6f6, /*tc_st*/
+ [InstrStage<1, [SLOT0]>], [3, 1, 2, 3],
+ [Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_ca280e8b, /*tc_2*/
+ [InstrStage<1, [SLOT2, SLOT3]>], [4, 2],
+ [Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_cbe45117, /*tc_2early*/
+ [InstrStage<1, [SLOT2]>], [2],
+ [Hex_FWD]>,
+
+ InstrItinData <tc_cd321066, /*tc_1*/
+ [InstrStage<1, [SLOT2, SLOT3]>], [3, 2, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_d108a090, /*tc_2early*/
+ [InstrStage<1, [SLOT2, SLOT3]>], [1, 2, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_d1b5a4b6, /*tc_1*/
+ [InstrStage<1, [SLOT2, SLOT3]>], [3, 2, 2, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_d2609065, /*tc_1*/
+ [InstrStage<1, [SLOT0, SLOT1]>], [3, 2],
+ [Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_d267fa19, /*tc_2early*/
+ [InstrStage<1, [SLOT2]>], [],
+ []>,
+
+ InstrItinData <tc_d2a33af5, /*tc_ld*/
+ [InstrStage<1, [SLOT0, SLOT1]>], [4, 3, 2, 1, 2, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_d63b71d1, /*tc_2early*/
+ [InstrStage<1, [SLOT2, SLOT3]>], [3, 2, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_d6a805a8, /*tc_3x*/
+ [InstrStage<1, [SLOT3]>], [2, 1],
+ [Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_d95f4e98, /*tc_1*/
+ [InstrStage<1, [SLOT2, SLOT3]>], [3, 2, 2, 2, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_da79106e, /*tc_st*/
+ [InstrStage<1, [SLOT0]>], [1, 2, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_dbe218dd, /*tc_3stall*/
+ [InstrStage<1, [SLOT0]>], [3, 2],
+ [Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_dcfee7ae, /*tc_3stall*/
+ [InstrStage<1, [SLOT0]>], [4, 2],
+ [Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_e17ce9ad, /*tc_2*/
+ [InstrStage<1, [SLOT2, SLOT3]>], [4, 2, 2, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_e2480a7f, /*tc_st*/
+ [InstrStage<1, [SLOT0]>], [3, 2, 1, 2, 3],
+ [Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_e2c08bb4, /*tc_2early*/
+ [InstrStage<1, [SLOT2, SLOT3]>], [4, 1, 1],
+ [Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_e2c31426, /*tc_1*/
+ [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3]>], [],
+ []>,
+
+ InstrItinData <tc_e578178f, /*tc_ld*/
+ [InstrStage<1, [SLOT0, SLOT1]>], [4, 3, 3, 1, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_e836c161, /*tc_3x*/
+ [InstrStage<1, [SLOT2, SLOT3]>], [4, 1],
+ [Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_e8c7a357, /*tc_2early*/
+ [InstrStage<1, [SLOT0, SLOT1]>], [1, 2],
+ [Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_eb07ef6f, /*tc_2early*/
+ [InstrStage<1, [SLOT2, SLOT3]>], [1, 2],
+ [Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_ecfaae86, /*tc_2early*/
+ [InstrStage<1, [SLOT2]>], [1],
+ [Hex_FWD]>,
+
+ InstrItinData <tc_ef0ebaaa, /*tc_ld*/
+ [InstrStage<1, [SLOT0]>], [1, 2],
+ [Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_ef2676fd, /*tc_st*/
+ [InstrStage<1, [SLOT0]>], [],
+ []>,
+
+ InstrItinData <tc_f027ebe9, /*tc_ld*/
+ [InstrStage<1, [SLOT0]>], [2],
+ [Hex_FWD]>,
+
+ InstrItinData <tc_f055fbb6, /*tc_3x*/
+ [InstrStage<1, [SLOT3]>], [2, 1],
+ [Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_f1240c08, /*tc_3x*/
+ [InstrStage<1, [SLOT2, SLOT3]>], [4, 1, 1],
+ [Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_f16d5b17, /*tc_1*/
+ [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3]>], [3, 2],
+ [Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_f1aa2cdb, /*tc_3x*/
+ [InstrStage<1, [SLOT2, SLOT3]>], [4, 4, 1],
+ [Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_f26aa619, /*tc_1*/
+ [InstrStage<1, [SLOT0, SLOT1]>], [3],
+ [Hex_FWD]>,
+
+ InstrItinData <tc_f4608adc, /*tc_3stall*/
+ [InstrStage<1, [SLOT0]>], [1, 1],
+ [Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_faab1248, /*tc_2*/
+ [InstrStage<1, [SLOT2, SLOT3]>], [4, 2, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_fcee8723, /*tc_st*/
+ [InstrStage<1, [SLOT0, SLOT1]>], [1, 2, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_feb4974b, /*tc_3x*/
+ [InstrStage<1, [SLOT3]>], [2, 2],
+ [Hex_FWD, Hex_FWD]>
+ ];
+}
+
+class DepScalarItinV60 {
+ list<InstrItinData> DepScalarItinV60_list = [
+ InstrItinData <tc_049dfb74, /*tc_2early*/
+ [InstrStage<1, [SLOT2]>], [1],
+ [Hex_FWD]>,
+
+ InstrItinData <tc_0767081f, /*tc_2early*/
+ [InstrStage<1, [SLOT2, SLOT3]>], [2, 2],
+ [Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_07ac815d, /*tc_2early*/
+ [InstrStage<1, [SLOT2]>], [2, 1],
+ [Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_090485bb, /*tc_2*/
+ [InstrStage<1, [SLOT2, SLOT3]>], [4, 2, 2, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_09c86199, /*tc_4x*/
+ [InstrStage<1, [SLOT2, SLOT3]>], [5, 5, 1, 1],
+ [Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_09faec3b, /*tc_newvjump*/
+ [InstrStage<1, [SLOT0]>], [3, 2, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_0cb867f2, /*tc_ld*/
+ [InstrStage<1, [SLOT0]>], [4, 2, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_1000eb10, /*tc_3x*/
+ [InstrStage<1, [SLOT3]>], [2, 2],
+ [Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_128719e8, /*tc_ld*/
+ [InstrStage<1, [SLOT0, SLOT1]>], [4, 3, 1, 1, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_136c4786, /*tc_ld*/
+ [InstrStage<1, [SLOT0, SLOT1]>], [4, 2, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_14da557c, /*tc_ld*/
+ [InstrStage<1, [SLOT0, SLOT1]>], [4, 2, 1, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_1b6011fb, /*tc_1*/
+ [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3]>], [3, 2, 2, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_1b834fe7, /*tc_2early*/
+ [InstrStage<1, [SLOT2, SLOT3]>], [2, 2],
+ [Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_1e062b18, /*tc_1*/
+ [InstrStage<1, [SLOT2, SLOT3]>], [3, 2, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_1e69aa99, /*tc_st*/
+ [InstrStage<1, [SLOT0, SLOT1]>], [2, 1, 2, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_1f9668cc, /*tc_2early*/
+ [InstrStage<1, [SLOT2]>], [3, 1],
+ [Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_1fe8323c, /*tc_2*/
+ [InstrStage<1, [SLOT3]>], [4, 2],
+ [Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_20a8e109, /*tc_st*/
+ [InstrStage<1, [SLOT0, SLOT1]>], [3, 1, 2, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_210b2456, /*tc_st*/
+ [InstrStage<1, [SLOT0]>], [1, 2, 2, 3],
+ [Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_251c87b2, /*tc_st*/
+ [InstrStage<1, [SLOT0, SLOT1]>], [3, 1, 2, 2, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_261d9b78, /*tc_ld*/
+ [InstrStage<1, [SLOT0, SLOT1]>], [4, 3, 2, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_28d296df, /*tc_1*/
+ [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3]>], [3, 3, 2, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_29c14515, /*tc_ld*/
+ [InstrStage<1, [SLOT0]>], [4, 1],
+ [Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_2aaab1e0, /*tc_3x*/
+ [InstrStage<1, [SLOT2, SLOT3]>], [4, 1, 1, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_2c8fe5ae, /*tc_st*/
+ [InstrStage<1, [SLOT0]>], [2, 2, 3],
+ [Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_2d1e6f5c, /*tc_4x*/
+ [InstrStage<1, [SLOT2, SLOT3]>], [5, 2, 1, 1],
+ [Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_2e55aa16, /*tc_4x*/
+ [InstrStage<1, [SLOT2, SLOT3]>], [5, 2, 1, 1, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_30665cb0, /*tc_st*/
+ [InstrStage<1, [SLOT0]>], [1],
+ [Hex_FWD]>,
+
+ InstrItinData <tc_336e698c, /*tc_st*/
+ [InstrStage<1, [SLOT0, SLOT1]>], [3, 2, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_34e882a4, /*tc_ld*/
+ [InstrStage<1, [SLOT0]>], [1],
+ [Hex_FWD]>,
+
+ InstrItinData <tc_35fb9d13, /*tc_2early*/
+ [InstrStage<1, [SLOT0]>], [],
+ []>,
+
+ InstrItinData <tc_37326008, /*tc_1*/
+ [InstrStage<1, [SLOT2, SLOT3]>], [3, 2, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_3993c58b, /*tc_newvjump*/
+ [InstrStage<1, [SLOT0]>], [3, 3, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_3b4892c6, /*tc_3x*/
+ [InstrStage<1, [SLOT3]>], [4, 2],
+ [Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_3bea1824, /*tc_4x*/
+ [InstrStage<1, [SLOT2, SLOT3]>], [5, 1, 1],
+ [Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_3c10f809, /*tc_2*/
+ [InstrStage<1, [SLOT2, SLOT3]>], [4, 2, 2, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_3d905451, /*tc_st*/
+ [InstrStage<1, [SLOT0, SLOT1]>], [2, 1, 2, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_3e61d314, /*tc_newvjump*/
+ [InstrStage<1, [SLOT0]>], [2, 3, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_3eab77bd, /*tc_ld*/
+ [InstrStage<1, [SLOT0, SLOT1]>], [4, 3, 1, 2, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_43068634, /*tc_2early*/
+ [InstrStage<1, [SLOT2, SLOT3]>], [3, 2, 2, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_45631a8d, /*tc_st*/
+ [InstrStage<1, [SLOT0, SLOT1]>], [1, 1, 2, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_47ab9233, /*tc_2*/
+ [InstrStage<1, [SLOT2, SLOT3]>], [4, 2, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_47f0b7ad, /*tc_2early*/
+ [InstrStage<1, [SLOT2, SLOT3]>], [3, 1],
+ [Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_485bb57c, /*tc_2*/
+ [InstrStage<1, [SLOT2, SLOT3]>], [4, 2],
+ [Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_4997da4a, /*tc_3x*/
+ [InstrStage<1, [SLOT3]>], [1],
+ [Hex_FWD]>,
+
+ InstrItinData <tc_511f28f6, /*tc_1*/
+ [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3]>], [3, 2, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_537e2013, /*tc_2early*/
+ [InstrStage<1, [SLOT2, SLOT3]>], [3, 2],
+ [Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_53ee6546, /*tc_st*/
+ [InstrStage<1, [SLOT0, SLOT1]>], [1, 2, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_548f402d, /*tc_1*/
+ [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3]>], [3, 2, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_5625c6c1, /*tc_ld*/
+ [InstrStage<1, [SLOT0, SLOT1]>], [4, 1, 1, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_580a779c, /*tc_newvjump*/
+ [InstrStage<1, [SLOT0]>], [3, 2, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_583510c7, /*tc_2*/
+ [InstrStage<1, [SLOT2, SLOT3]>], [4, 4, 2, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_5d806107, /*tc_3stall*/
+ [InstrStage<1, [SLOT2, SLOT3]>], [4, 1, 1],
+ [Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_5fa2857c, /*tc_2early*/
+ [InstrStage<1, [SLOT2, SLOT3]>], [3, 1, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_5fe9fcd0, /*tc_2early*/
+ [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3]>], [3, 1, 1],
+ [Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_6264c5e0, /*tc_3x*/
+ [InstrStage<1, [SLOT2, SLOT3]>], [4, 1, 1, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_639d93ee, /*tc_2early*/
+ [InstrStage<1, [SLOT2, SLOT3]>], [2],
+ [Hex_FWD]>,
+
+ InstrItinData <tc_63cd9d2d, /*tc_2*/
+ [InstrStage<1, [SLOT2, SLOT3]>], [4, 2, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_65dc7cc4, /*tc_ld*/
+ [InstrStage<1, [SLOT0, SLOT1]>], [4, 3, 1, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_69bb508b, /*tc_3x*/
+ [InstrStage<1, [SLOT2, SLOT3]>], [4, 2, 2, 1],
+ [Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_6c52d277, /*tc_st*/
+ [InstrStage<1, [SLOT0, SLOT1]>], [1, 2],
+ [Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_6c576d46, /*tc_st*/
+ [InstrStage<1, [SLOT0]>], [1, 2, 3],
+ [Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_70cabf66, /*tc_ld*/
+ [InstrStage<1, [SLOT0, SLOT1]>], [4, 2],
+ [Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_7639d4b0, /*tc_st*/
+ [InstrStage<1, [SLOT0, SLOT1]>], [3, 1, 1, 2, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_7675c0e9, /*tc_st*/
+ [InstrStage<1, [SLOT0, SLOT1]>], [3, 3, 1, 2, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_76c4c5ef, /*tc_1*/
+ [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3]>], [3, 2, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_77781686, /*tc_st*/
+ [InstrStage<1, [SLOT0]>], [2, 1, 1, 2, 3],
+ [Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_78b3c689, /*tc_1*/
+ [InstrStage<1, [SLOT2, SLOT3]>], [3, 2],
+ [Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_7986ba30, /*tc_st*/
+ [InstrStage<1, [SLOT0]>], [3, 2, 3],
+ [Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_7bc567a7, /*tc_st*/
+ [InstrStage<1, [SLOT0, SLOT1]>], [2, 1, 1, 2, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_7c2dcd4d, /*tc_1*/
+ [InstrStage<1, [SLOT0, SLOT1]>], [3],
+ [Hex_FWD]>,
+
+ InstrItinData <tc_7ca2ea10, /*tc_2*/
+ [InstrStage<1, [SLOT2, SLOT3]>], [4, 2, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_7d01cbdc, /*tc_3stall*/
+ [InstrStage<1, [SLOT0]>], [4, 1, 1],
+ [Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_7d9a56cd, /*tc_ld*/
+ [InstrStage<1, [SLOT0, SLOT1]>], [4, 1, 2, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_81a23d44, /*tc_2early*/
+ [InstrStage<1, [SLOT2, SLOT3]>], [3, 2],
+ [Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_821c4233, /*tc_1*/
+ [InstrStage<1, [SLOT0, SLOT1]>], [3, 2, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_82f0f122, /*tc_3x*/
+ [InstrStage<1, [SLOT3]>], [4, 1],
+ [Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_84630363, /*tc_2early*/
+ [InstrStage<1, [SLOT2]>], [2, 1],
+ [Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_86442910, /*tc_ld*/
+ [InstrStage<1, [SLOT0, SLOT1]>], [],
+ []>,
+
+ InstrItinData <tc_87601822, /*tc_2*/
+ [InstrStage<1, [SLOT2, SLOT3]>], [4, 2, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_88fa2da6, /*tc_2*/
+ [InstrStage<1, [SLOT2, SLOT3]>], [4, 2],
+ [Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_8c8041e6, /*tc_3x*/
+ [InstrStage<1, [SLOT2, SLOT3]>], [4, 1, 1],
+ [Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_8cb685d9, /*tc_3x*/
+ [InstrStage<1, [SLOT2, SLOT3]>], [4, 2, 1, 1],
+ [Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_8def9c57, /*tc_st*/
+ [InstrStage<1, [SLOT0]>], [3, 1, 1, 2, 3],
+ [Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_8f0a6bad, /*tc_st*/
+ [InstrStage<1, [SLOT0, SLOT1]>], [3, 1, 2, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_8fab9ac3, /*tc_st*/
+ [InstrStage<1, [SLOT0]>], [3, 3, 1, 2, 3],
+ [Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_92d1833c, /*tc_2early*/
+ [InstrStage<1, [SLOT2, SLOT3]>], [1, 1, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_94e6ffd9, /*tc_2*/
+ [InstrStage<1, [SLOT2, SLOT3]>], [4, 2],
+ [Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_95c54f8b, /*tc_newvjump*/
+ [InstrStage<1, [SLOT0]>], [],
+ []>,
+
+ InstrItinData <tc_9a13af9d, /*tc_1*/
+ [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3]>], [2],
+ [Hex_FWD]>,
+
+ InstrItinData <tc_9b73d261, /*tc_st*/
+ [InstrStage<1, [SLOT0, SLOT1]>], [3, 2, 1, 2, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_9c18c9a5, /*tc_1*/
+ [InstrStage<1, [SLOT2, SLOT3]>], [3, 2, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_9c68db63, /*tc_st*/
+ [InstrStage<1, [SLOT0]>], [3, 1, 2, 2, 3],
+ [Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_9ce7a5ab, /*tc_newvjump*/
+ [InstrStage<1, [SLOT0]>], [3, 2, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_9da3628f, /*tc_st*/
+ [InstrStage<1, [SLOT0]>], [2, 1, 2, 3],
+ [Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_9dafb7d3, /*tc_ld*/
+ [InstrStage<1, [SLOT0, SLOT1]>], [4, 2, 1, 1, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_9df8b0dc, /*tc_2early*/
+ [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3]>], [3, 1, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_9e86015f, /*tc_st*/
+ [InstrStage<1, [SLOT0]>], [2, 3],
+ [Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_9f518242, /*tc_1*/
+ [InstrStage<1, [SLOT2, SLOT3]>], [3, 2, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_a12a5971, /*tc_3x*/
+ [InstrStage<1, [SLOT2, SLOT3]>], [4, 2, 1, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_a1fb80e1, /*tc_2early*/
+ [InstrStage<1, [SLOT2]>], [2, 1],
+ [Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_a333d2a9, /*tc_2early*/
+ [InstrStage<1, [SLOT2, SLOT3]>], [2],
+ [Hex_FWD]>,
+
+ InstrItinData <tc_a4567c39, /*tc_st*/
+ [InstrStage<1, [SLOT0, SLOT1]>], [1, 2, 2, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_a87879e8, /*tc_3stall*/
+ [InstrStage<1, [SLOT2, SLOT3]>], [4, 4, 1, 1, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_a9c993d9, /*tc_st*/
+ [InstrStage<1, [SLOT0]>], [1, 2, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_aad55963, /*tc_2early*/
+ [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3]>], [],
+ []>,
+
+ InstrItinData <tc_ab1b5e74, /*tc_2*/
+ [InstrStage<1, [SLOT2, SLOT3]>], [4, 2],
+ [Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_ae0722f7, /*tc_3stall*/
+ [InstrStage<1, [SLOT2, SLOT3]>], [4, 4, 1, 1, 1],
+ [Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_ae2c2dc2, /*tc_3x*/
+ [InstrStage<1, [SLOT2, SLOT3]>], [4, 1, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_ae762521, /*tc_ld*/
+ [InstrStage<1, [SLOT0, SLOT1]>], [4, 3, 2, 1, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_b08b653e, /*tc_2early*/
+ [InstrStage<1, [SLOT2]>], [1],
+ [Hex_FWD]>,
+
+ InstrItinData <tc_b08be45e, /*tc_1*/
+ [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3]>], [3, 3, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_b0f50e3c, /*tc_2*/
+ [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3]>], [4, 2, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_b189ad4c, /*tc_3stall*/
+ [InstrStage<1, [SLOT2]>], [2],
+ [Hex_FWD]>,
+
+ InstrItinData <tc_b324366f, /*tc_2early*/
+ [InstrStage<1, [SLOT3]>], [1, 2],
+ [Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_b5bfaa60, /*tc_2early*/
+ [InstrStage<1, [SLOT2, SLOT3]>], [2, 2],
+ [Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_b5f5a094, /*tc_ld*/
+ [InstrStage<1, [SLOT0, SLOT1]>], [4, 3, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_b86c7e8b, /*tc_1*/
+ [InstrStage<1, [SLOT2, SLOT3]>], [3, 2],
+ [Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_baccf077, /*tc_ld*/
+ [InstrStage<1, [SLOT0, SLOT1]>], [4, 2, 1, 2, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_bc5561d8, /*tc_3x*/
+ [InstrStage<1, [SLOT2, SLOT3]>], [4, 2, 1, 1, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_bcf0e36e, /*tc_3stall*/
+ [InstrStage<1, [SLOT3]>], [],
+ []>,
+
+ InstrItinData <tc_bd16579e, /*tc_1*/
+ [InstrStage<1, [SLOT2, SLOT3]>], [3, 2, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_be995eaf, /*tc_st*/
+ [InstrStage<1, [SLOT0]>], [1, 1, 2, 3],
+ [Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_bf6fa601, /*tc_ld*/
+ [InstrStage<1, [SLOT0, SLOT1]>], [4, 1, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_c0cd91a8, /*tc_2*/
+ [InstrStage<1, [SLOT2, SLOT3]>], [4, 2, 2, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_c14739d5, /*tc_st*/
+ [InstrStage<1, [SLOT0, SLOT1]>], [2, 2],
+ [Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_c1dbc916, /*tc_ld*/
+ [InstrStage<1, [SLOT0, SLOT1]>], [4, 2],
+ [Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_c58f771a, /*tc_2early*/
+ [InstrStage<1, [SLOT2, SLOT3]>], [3, 1, 1],
+ [Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_c85212ca, /*tc_st*/
+ [InstrStage<1, [SLOT0, SLOT1]>], [2, 2, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_c8f9a6f6, /*tc_st*/
+ [InstrStage<1, [SLOT0]>], [3, 1, 2, 3],
+ [Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_ca280e8b, /*tc_2*/
+ [InstrStage<1, [SLOT2, SLOT3]>], [4, 2],
+ [Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_cbe45117, /*tc_2early*/
+ [InstrStage<1, [SLOT2]>], [2],
+ [Hex_FWD]>,
+
+ InstrItinData <tc_cd321066, /*tc_1*/
+ [InstrStage<1, [SLOT2, SLOT3]>], [3, 2, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_d108a090, /*tc_2early*/
+ [InstrStage<1, [SLOT2, SLOT3]>], [1, 2, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_d1b5a4b6, /*tc_1*/
+ [InstrStage<1, [SLOT2, SLOT3]>], [3, 2, 2, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_d2609065, /*tc_1*/
+ [InstrStage<1, [SLOT0, SLOT1]>], [3, 2],
+ [Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_d267fa19, /*tc_2early*/
+ [InstrStage<1, [SLOT2]>], [],
+ []>,
+
+ InstrItinData <tc_d2a33af5, /*tc_ld*/
+ [InstrStage<1, [SLOT0, SLOT1]>], [4, 3, 2, 1, 2, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_d63b71d1, /*tc_2early*/
+ [InstrStage<1, [SLOT2, SLOT3]>], [3, 2, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_d6a805a8, /*tc_3stall*/
+ [InstrStage<1, [SLOT3]>], [2, 1],
+ [Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_d95f4e98, /*tc_2*/
+ [InstrStage<1, [SLOT2, SLOT3]>], [4, 2, 2, 2, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_da79106e, /*tc_st*/
+ [InstrStage<1, [SLOT0]>], [1, 2, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_dbe218dd, /*tc_newvjump*/
+ [InstrStage<1, [SLOT0]>], [3, 2],
+ [Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_dcfee7ae, /*tc_newvjump*/
+ [InstrStage<1, [SLOT0]>], [3, 2],
+ [Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_e17ce9ad, /*tc_2*/
+ [InstrStage<1, [SLOT2, SLOT3]>], [4, 2, 2, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_e2480a7f, /*tc_st*/
+ [InstrStage<1, [SLOT0]>], [3, 2, 1, 2, 3],
+ [Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_e2c08bb4, /*tc_3stall*/
+ [InstrStage<1, [SLOT2, SLOT3]>], [4, 1, 1],
+ [Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_e2c31426, /*tc_1*/
+ [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3]>], [],
+ []>,
+
+ InstrItinData <tc_e578178f, /*tc_ld*/
+ [InstrStage<1, [SLOT0, SLOT1]>], [4, 3, 3, 1, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_e836c161, /*tc_4x*/
+ [InstrStage<1, [SLOT2, SLOT3]>], [5, 1],
+ [Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_e8c7a357, /*tc_2early*/
+ [InstrStage<1, [SLOT0, SLOT1]>], [1, 2],
+ [Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_eb07ef6f, /*tc_2early*/
+ [InstrStage<1, [SLOT2, SLOT3]>], [1, 2],
+ [Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_ecfaae86, /*tc_2early*/
+ [InstrStage<1, [SLOT2]>], [1],
+ [Hex_FWD]>,
+
+ InstrItinData <tc_ef0ebaaa, /*tc_ld*/
+ [InstrStage<1, [SLOT0]>], [1, 2],
+ [Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_ef2676fd, /*tc_st*/
+ [InstrStage<1, [SLOT0]>], [],
+ []>,
+
+ InstrItinData <tc_f027ebe9, /*tc_ld*/
+ [InstrStage<1, [SLOT0]>], [2],
+ [Hex_FWD]>,
+
+ InstrItinData <tc_f055fbb6, /*tc_3x*/
+ [InstrStage<1, [SLOT3]>], [2, 1],
+ [Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_f1240c08, /*tc_2*/
+ [InstrStage<1, [SLOT2, SLOT3]>], [4, 2, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_f16d5b17, /*tc_1*/
+ [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3]>], [3, 2],
+ [Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_f1aa2cdb, /*tc_4x*/
+ [InstrStage<1, [SLOT2, SLOT3]>], [5, 5, 1],
+ [Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_f26aa619, /*tc_1*/
+ [InstrStage<1, [SLOT0, SLOT1]>], [3],
+ [Hex_FWD]>,
+
+ InstrItinData <tc_f4608adc, /*tc_3stall*/
+ [InstrStage<1, [SLOT0]>], [1, 1],
+ [Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_faab1248, /*tc_2*/
+ [InstrStage<1, [SLOT2, SLOT3]>], [4, 2, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_fcee8723, /*tc_st*/
+ [InstrStage<1, [SLOT0, SLOT1]>], [1, 2, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_feb4974b, /*tc_3stall*/
+ [InstrStage<1, [SLOT3]>], [2, 2],
+ [Hex_FWD, Hex_FWD]>
+ ];
+}
+
+class DepScalarItinV62 {
+ list<InstrItinData> DepScalarItinV62_list = [
+ InstrItinData <tc_049dfb74, /*tc_2early*/
+ [InstrStage<1, [SLOT2]>], [1],
+ [Hex_FWD]>,
+
+ InstrItinData <tc_0767081f, /*tc_3*/
+ [InstrStage<1, [SLOT2, SLOT3]>], [2, 2],
+ [Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_07ac815d, /*tc_2early*/
+ [InstrStage<1, [SLOT2]>], [2, 1],
+ [Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_090485bb, /*tc_2*/
+ [InstrStage<1, [SLOT2, SLOT3]>], [4, 2, 2, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_09c86199, /*tc_4x*/
+ [InstrStage<1, [SLOT2, SLOT3]>], [5, 5, 1, 1],
+ [Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_09faec3b, /*tc_newvjump*/
+ [InstrStage<1, [SLOT0]>], [3, 2, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_0cb867f2, /*tc_ld*/
+ [InstrStage<1, [SLOT0]>], [4, 2, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_1000eb10, /*tc_3x*/
+ [InstrStage<1, [SLOT3]>], [2, 2],
+ [Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_128719e8, /*tc_ld*/
+ [InstrStage<1, [SLOT0, SLOT1]>], [4, 3, 1, 1, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_136c4786, /*tc_ld*/
+ [InstrStage<1, [SLOT0, SLOT1]>], [4, 2, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_14da557c, /*tc_ld*/
+ [InstrStage<1, [SLOT0, SLOT1]>], [4, 2, 1, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_1b6011fb, /*tc_1*/
+ [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3]>], [3, 2, 2, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_1b834fe7, /*tc_2early*/
+ [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3]>], [2, 2],
+ [Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_1e062b18, /*tc_1*/
+ [InstrStage<1, [SLOT2, SLOT3]>], [3, 2, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_1e69aa99, /*tc_st*/
+ [InstrStage<1, [SLOT0, SLOT1]>], [2, 1, 2, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_1f9668cc, /*tc_2early*/
+ [InstrStage<1, [SLOT2]>], [3, 1],
+ [Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_1fe8323c, /*tc_2*/
+ [InstrStage<1, [SLOT3]>], [4, 2],
+ [Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_20a8e109, /*tc_st*/
+ [InstrStage<1, [SLOT0, SLOT1]>], [3, 1, 2, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_210b2456, /*tc_st*/
+ [InstrStage<1, [SLOT0]>], [1, 2, 2, 3],
+ [Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_251c87b2, /*tc_st*/
+ [InstrStage<1, [SLOT0, SLOT1]>], [3, 1, 2, 2, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_261d9b78, /*tc_ld*/
+ [InstrStage<1, [SLOT0, SLOT1]>], [4, 3, 2, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_28d296df, /*tc_1*/
+ [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3]>], [3, 3, 2, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_29c14515, /*tc_ld*/
+ [InstrStage<1, [SLOT0]>], [4, 1],
+ [Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_2aaab1e0, /*tc_3x*/
+ [InstrStage<1, [SLOT2, SLOT3]>], [4, 1, 1, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_2c8fe5ae, /*tc_st*/
+ [InstrStage<1, [SLOT0]>], [2, 2, 3],
+ [Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_2d1e6f5c, /*tc_4x*/
+ [InstrStage<1, [SLOT2, SLOT3]>], [5, 2, 1, 1],
+ [Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_2e55aa16, /*tc_4x*/
+ [InstrStage<1, [SLOT2, SLOT3]>], [5, 2, 1, 1, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_30665cb0, /*tc_st*/
+ [InstrStage<1, [SLOT0]>], [1],
+ [Hex_FWD]>,
+
+ InstrItinData <tc_336e698c, /*tc_st*/
+ [InstrStage<1, [SLOT0, SLOT1]>], [3, 2, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_34e882a4, /*tc_ld*/
+ [InstrStage<1, [SLOT0]>], [1],
+ [Hex_FWD]>,
+
+ InstrItinData <tc_35fb9d13, /*tc_2early*/
+ [InstrStage<1, [SLOT0]>], [],
+ []>,
+
+ InstrItinData <tc_37326008, /*tc_1*/
+ [InstrStage<1, [SLOT2, SLOT3]>], [3, 2, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_3993c58b, /*tc_newvjump*/
+ [InstrStage<1, [SLOT0]>], [3, 3, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_3b4892c6, /*tc_3x*/
+ [InstrStage<1, [SLOT3]>], [4, 2],
+ [Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_3bea1824, /*tc_4x*/
+ [InstrStage<1, [SLOT2, SLOT3]>], [5, 1, 1],
+ [Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_3c10f809, /*tc_2*/
+ [InstrStage<1, [SLOT2, SLOT3]>], [4, 2, 2, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_3d905451, /*tc_st*/
+ [InstrStage<1, [SLOT0, SLOT1]>], [2, 1, 2, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_3e61d314, /*tc_newvjump*/
+ [InstrStage<1, [SLOT0]>], [2, 3, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_3eab77bd, /*tc_ld*/
+ [InstrStage<1, [SLOT0, SLOT1]>], [4, 3, 1, 2, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_43068634, /*tc_2early*/
+ [InstrStage<1, [SLOT2, SLOT3]>], [3, 2, 2, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_45631a8d, /*tc_st*/
+ [InstrStage<1, [SLOT0, SLOT1]>], [1, 1, 2, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_47ab9233, /*tc_2*/
+ [InstrStage<1, [SLOT2, SLOT3]>], [4, 2, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_47f0b7ad, /*tc_2early*/
+ [InstrStage<1, [SLOT2, SLOT3]>], [3, 1],
+ [Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_485bb57c, /*tc_2*/
+ [InstrStage<1, [SLOT2, SLOT3]>], [4, 2],
+ [Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_4997da4a, /*tc_3x*/
+ [InstrStage<1, [SLOT3]>], [1],
+ [Hex_FWD]>,
+
+ InstrItinData <tc_511f28f6, /*tc_1*/
+ [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3]>], [3, 2, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_537e2013, /*tc_2early*/
+ [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3]>], [3, 2],
+ [Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_53ee6546, /*tc_st*/
+ [InstrStage<1, [SLOT0, SLOT1]>], [1, 2, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_548f402d, /*tc_1*/
+ [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3]>], [3, 2, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_5625c6c1, /*tc_ld*/
+ [InstrStage<1, [SLOT0, SLOT1]>], [4, 1, 1, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_580a779c, /*tc_newvjump*/
+ [InstrStage<1, [SLOT0]>], [3, 2, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_583510c7, /*tc_2*/
+ [InstrStage<1, [SLOT2, SLOT3]>], [4, 4, 2, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_5d806107, /*tc_3x*/
+ [InstrStage<1, [SLOT2, SLOT3]>], [4, 1, 1],
+ [Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_5fa2857c, /*tc_2early*/
+ [InstrStage<1, [SLOT2, SLOT3]>], [3, 1, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_5fe9fcd0, /*tc_2early*/
+ [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3]>], [3, 1, 1],
+ [Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_6264c5e0, /*tc_3x*/
+ [InstrStage<1, [SLOT2, SLOT3]>], [4, 1, 1, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_639d93ee, /*tc_3*/
+ [InstrStage<1, [SLOT2, SLOT3]>], [2],
+ [Hex_FWD]>,
+
+ InstrItinData <tc_63cd9d2d, /*tc_2*/
+ [InstrStage<1, [SLOT2, SLOT3]>], [4, 2, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_65dc7cc4, /*tc_ld*/
+ [InstrStage<1, [SLOT0, SLOT1]>], [4, 3, 1, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_69bb508b, /*tc_3x*/
+ [InstrStage<1, [SLOT2, SLOT3]>], [4, 2, 2, 1],
+ [Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_6c52d277, /*tc_st*/
+ [InstrStage<1, [SLOT0, SLOT1]>], [1, 2],
+ [Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_6c576d46, /*tc_st*/
+ [InstrStage<1, [SLOT0]>], [1, 2, 3],
+ [Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_70cabf66, /*tc_ld*/
+ [InstrStage<1, [SLOT0, SLOT1]>], [4, 2],
+ [Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_7639d4b0, /*tc_st*/
+ [InstrStage<1, [SLOT0, SLOT1]>], [3, 1, 1, 2, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_7675c0e9, /*tc_st*/
+ [InstrStage<1, [SLOT0, SLOT1]>], [3, 3, 1, 2, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_76c4c5ef, /*tc_1*/
+ [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3]>], [3, 2, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_77781686, /*tc_st*/
+ [InstrStage<1, [SLOT0]>], [2, 1, 1, 2, 3],
+ [Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_78b3c689, /*tc_1*/
+ [InstrStage<1, [SLOT2, SLOT3]>], [3, 2],
+ [Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_7986ba30, /*tc_st*/
+ [InstrStage<1, [SLOT0]>], [3, 2, 3],
+ [Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_7bc567a7, /*tc_st*/
+ [InstrStage<1, [SLOT0, SLOT1]>], [2, 1, 1, 2, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_7c2dcd4d, /*tc_1*/
+ [InstrStage<1, [SLOT0, SLOT1]>], [3],
+ [Hex_FWD]>,
+
+ InstrItinData <tc_7ca2ea10, /*tc_2*/
+ [InstrStage<1, [SLOT2, SLOT3]>], [4, 2, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_7d01cbdc, /*tc_3stall*/
+ [InstrStage<1, [SLOT0]>], [4, 1, 1],
+ [Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_7d9a56cd, /*tc_ld*/
+ [InstrStage<1, [SLOT0, SLOT1]>], [4, 1, 2, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_81a23d44, /*tc_2early*/
+ [InstrStage<1, [SLOT2, SLOT3]>], [3, 2],
+ [Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_821c4233, /*tc_1*/
+ [InstrStage<1, [SLOT0, SLOT1]>], [3, 2, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_82f0f122, /*tc_3x*/
+ [InstrStage<1, [SLOT3]>], [4, 1],
+ [Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_84630363, /*tc_3*/
+ [InstrStage<1, [SLOT2]>], [2, 1],
+ [Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_86442910, /*tc_ld*/
+ [InstrStage<1, [SLOT0, SLOT1]>], [],
+ []>,
+
+ InstrItinData <tc_87601822, /*tc_2*/
+ [InstrStage<1, [SLOT2, SLOT3]>], [4, 2, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_88fa2da6, /*tc_2*/
+ [InstrStage<1, [SLOT2, SLOT3]>], [4, 2],
+ [Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_8c8041e6, /*tc_3x*/
+ [InstrStage<1, [SLOT2, SLOT3]>], [4, 1, 1],
+ [Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_8cb685d9, /*tc_3x*/
+ [InstrStage<1, [SLOT2, SLOT3]>], [4, 2, 1, 1],
+ [Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_8def9c57, /*tc_st*/
+ [InstrStage<1, [SLOT0]>], [3, 1, 1, 2, 3],
+ [Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_8f0a6bad, /*tc_st*/
+ [InstrStage<1, [SLOT0, SLOT1]>], [3, 1, 2, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_8fab9ac3, /*tc_st*/
+ [InstrStage<1, [SLOT0]>], [3, 3, 1, 2, 3],
+ [Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_92d1833c, /*tc_2early*/
+ [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3]>], [1, 1, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_94e6ffd9, /*tc_2*/
+ [InstrStage<1, [SLOT2, SLOT3]>], [4, 2],
+ [Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_95c54f8b, /*tc_newvjump*/
+ [InstrStage<1, [SLOT0]>], [],
+ []>,
+
+ InstrItinData <tc_9a13af9d, /*tc_1*/
+ [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3]>], [2],
+ [Hex_FWD]>,
+
+ InstrItinData <tc_9b73d261, /*tc_st*/
+ [InstrStage<1, [SLOT0, SLOT1]>], [3, 2, 1, 2, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_9c18c9a5, /*tc_1*/
+ [InstrStage<1, [SLOT2, SLOT3]>], [3, 2, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_9c68db63, /*tc_st*/
+ [InstrStage<1, [SLOT0]>], [3, 1, 2, 2, 3],
+ [Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_9ce7a5ab, /*tc_newvjump*/
+ [InstrStage<1, [SLOT0]>], [3, 2, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_9da3628f, /*tc_st*/
+ [InstrStage<1, [SLOT0]>], [2, 1, 2, 3],
+ [Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_9dafb7d3, /*tc_ld*/
+ [InstrStage<1, [SLOT0, SLOT1]>], [4, 2, 1, 1, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_9df8b0dc, /*tc_2early*/
+ [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3]>], [3, 1, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_9e86015f, /*tc_st*/
+ [InstrStage<1, [SLOT0]>], [2, 3],
+ [Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_9f518242, /*tc_1*/
+ [InstrStage<1, [SLOT2, SLOT3]>], [3, 2, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_a12a5971, /*tc_3x*/
+ [InstrStage<1, [SLOT2, SLOT3]>], [4, 2, 1, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_a1fb80e1, /*tc_2early*/
+ [InstrStage<1, [SLOT2]>], [2, 1],
+ [Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_a333d2a9, /*tc_2early*/
+ [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3]>], [2],
+ [Hex_FWD]>,
+
+ InstrItinData <tc_a4567c39, /*tc_st*/
+ [InstrStage<1, [SLOT0, SLOT1]>], [1, 2, 2, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_a87879e8, /*tc_1*/
+ [InstrStage<1, [SLOT2, SLOT3]>], [3, 4, 2, 2, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_a9c993d9, /*tc_st*/
+ [InstrStage<1, [SLOT0]>], [1, 2, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_aad55963, /*tc_2early*/
+ [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3]>], [],
+ []>,
+
+ InstrItinData <tc_ab1b5e74, /*tc_2*/
+ [InstrStage<1, [SLOT2, SLOT3]>], [4, 2],
+ [Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_ae0722f7, /*tc_3x*/
+ [InstrStage<1, [SLOT2, SLOT3]>], [4, 4, 2, 1, 1],
+ [Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_ae2c2dc2, /*tc_3x*/
+ [InstrStage<1, [SLOT2, SLOT3]>], [4, 1, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_ae762521, /*tc_ld*/
+ [InstrStage<1, [SLOT0, SLOT1]>], [4, 3, 2, 1, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_b08b653e, /*tc_2early*/
+ [InstrStage<1, [SLOT2]>], [1],
+ [Hex_FWD]>,
+
+ InstrItinData <tc_b08be45e, /*tc_1*/
+ [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3]>], [3, 3, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_b0f50e3c, /*tc_2*/
+ [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3]>], [4, 2, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_b189ad4c, /*tc_3stall*/
+ [InstrStage<1, [SLOT2]>], [2],
+ [Hex_FWD]>,
+
+ InstrItinData <tc_b324366f, /*tc_2early*/
+ [InstrStage<1, [SLOT3]>], [1, 2],
+ [Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_b5bfaa60, /*tc_2early*/
+ [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3]>], [2, 2],
+ [Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_b5f5a094, /*tc_ld*/
+ [InstrStage<1, [SLOT0, SLOT1]>], [4, 3, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_b86c7e8b, /*tc_1*/
+ [InstrStage<1, [SLOT2, SLOT3]>], [3, 2],
+ [Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_baccf077, /*tc_ld*/
+ [InstrStage<1, [SLOT0, SLOT1]>], [4, 2, 1, 2, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_bc5561d8, /*tc_3x*/
+ [InstrStage<1, [SLOT2, SLOT3]>], [4, 2, 1, 1, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_bcf0e36e, /*tc_3stall*/
+ [InstrStage<1, [SLOT3]>], [],
+ []>,
+
+ InstrItinData <tc_bd16579e, /*tc_2*/
+ [InstrStage<1, [SLOT2, SLOT3]>], [4, 2, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_be995eaf, /*tc_st*/
+ [InstrStage<1, [SLOT0]>], [1, 1, 2, 3],
+ [Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_bf6fa601, /*tc_ld*/
+ [InstrStage<1, [SLOT0, SLOT1]>], [4, 1, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_c0cd91a8, /*tc_2*/
+ [InstrStage<1, [SLOT2, SLOT3]>], [4, 2, 2, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_c14739d5, /*tc_st*/
+ [InstrStage<1, [SLOT0, SLOT1]>], [2, 2],
+ [Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_c1dbc916, /*tc_ld*/
+ [InstrStage<1, [SLOT0, SLOT1]>], [4, 2],
+ [Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_c58f771a, /*tc_2early*/
+ [InstrStage<1, [SLOT2, SLOT3]>], [3, 1, 1],
+ [Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_c85212ca, /*tc_st*/
+ [InstrStage<1, [SLOT0, SLOT1]>], [2, 2, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_c8f9a6f6, /*tc_st*/
+ [InstrStage<1, [SLOT0]>], [3, 1, 2, 3],
+ [Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_ca280e8b, /*tc_2*/
+ [InstrStage<1, [SLOT2, SLOT3]>], [4, 2],
+ [Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_cbe45117, /*tc_2early*/
+ [InstrStage<1, [SLOT2]>], [2],
+ [Hex_FWD]>,
+
+ InstrItinData <tc_cd321066, /*tc_1*/
+ [InstrStage<1, [SLOT2, SLOT3]>], [3, 2, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_d108a090, /*tc_2early*/
+ [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3]>], [1, 2, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_d1b5a4b6, /*tc_1*/
+ [InstrStage<1, [SLOT2, SLOT3]>], [3, 2, 2, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_d2609065, /*tc_1*/
+ [InstrStage<1, [SLOT0, SLOT1]>], [3, 2],
+ [Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_d267fa19, /*tc_2early*/
+ [InstrStage<1, [SLOT2]>], [],
+ []>,
+
+ InstrItinData <tc_d2a33af5, /*tc_ld*/
+ [InstrStage<1, [SLOT0, SLOT1]>], [4, 3, 2, 1, 2, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_d63b71d1, /*tc_2early*/
+ [InstrStage<1, [SLOT2, SLOT3]>], [3, 2, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_d6a805a8, /*tc_3stall*/
+ [InstrStage<1, [SLOT3]>], [2, 1],
+ [Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_d95f4e98, /*tc_2*/
+ [InstrStage<1, [SLOT2, SLOT3]>], [4, 2, 2, 2, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_da79106e, /*tc_st*/
+ [InstrStage<1, [SLOT0]>], [1, 2, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_dbe218dd, /*tc_newvjump*/
+ [InstrStage<1, [SLOT0]>], [3, 2],
+ [Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_dcfee7ae, /*tc_newvjump*/
+ [InstrStage<1, [SLOT0]>], [3, 2],
+ [Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_e17ce9ad, /*tc_2*/
+ [InstrStage<1, [SLOT2, SLOT3]>], [4, 2, 2, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_e2480a7f, /*tc_st*/
+ [InstrStage<1, [SLOT0]>], [3, 2, 1, 2, 3],
+ [Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_e2c08bb4, /*tc_3stall*/
+ [InstrStage<1, [SLOT2, SLOT3]>], [4, 1, 1],
+ [Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_e2c31426, /*tc_1*/
+ [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3]>], [],
+ []>,
+
+ InstrItinData <tc_e578178f, /*tc_ld*/
+ [InstrStage<1, [SLOT0, SLOT1]>], [4, 3, 3, 1, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_e836c161, /*tc_4x*/
+ [InstrStage<1, [SLOT2, SLOT3]>], [5, 1],
+ [Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_e8c7a357, /*tc_2early*/
+ [InstrStage<1, [SLOT0, SLOT1]>], [1, 2],
+ [Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_eb07ef6f, /*tc_2early*/
+ [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3]>], [1, 2],
+ [Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_ecfaae86, /*tc_3*/
+ [InstrStage<1, [SLOT2]>], [1],
+ [Hex_FWD]>,
+
+ InstrItinData <tc_ef0ebaaa, /*tc_ld*/
+ [InstrStage<1, [SLOT0]>], [1, 2],
+ [Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_ef2676fd, /*tc_st*/
+ [InstrStage<1, [SLOT0]>], [],
+ []>,
+
+ InstrItinData <tc_f027ebe9, /*tc_ld*/
+ [InstrStage<1, [SLOT0]>], [2],
+ [Hex_FWD]>,
+
+ InstrItinData <tc_f055fbb6, /*tc_3x*/
+ [InstrStage<1, [SLOT3]>], [2, 1],
+ [Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_f1240c08, /*tc_2*/
+ [InstrStage<1, [SLOT2, SLOT3]>], [4, 2, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_f16d5b17, /*tc_1*/
+ [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3]>], [3, 2],
+ [Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_f1aa2cdb, /*tc_4x*/
+ [InstrStage<1, [SLOT2, SLOT3]>], [5, 5, 1],
+ [Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_f26aa619, /*tc_1*/
+ [InstrStage<1, [SLOT0, SLOT1]>], [3],
+ [Hex_FWD]>,
+
+ InstrItinData <tc_f4608adc, /*tc_3stall*/
+ [InstrStage<1, [SLOT0]>], [1, 1],
+ [Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_faab1248, /*tc_2*/
+ [InstrStage<1, [SLOT2, SLOT3]>], [4, 2, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_fcee8723, /*tc_st*/
+ [InstrStage<1, [SLOT0, SLOT1]>], [1, 2, 2],
+ [Hex_FWD, Hex_FWD, Hex_FWD]>,
+
+ InstrItinData <tc_feb4974b, /*tc_3stall*/
+ [InstrStage<1, [SLOT3]>], [2, 2],
+ [Hex_FWD, Hex_FWD]>
+ ];
+}
diff --git a/lib/Target/Hexagon/HexagonDepITypes.h b/lib/Target/Hexagon/HexagonDepITypes.h
index 331edaf5831d..be831b9501ea 100644
--- a/lib/Target/Hexagon/HexagonDepITypes.h
+++ b/lib/Target/Hexagon/HexagonDepITypes.h
@@ -15,38 +15,38 @@ enum Type {
TypeALU32_ADDI = 2,
TypeALU64 = 3,
TypeCJ = 4,
- TypeCOPROC_VMEM = 5,
- TypeCR = 7,
+ TypeCR = 6,
TypeCVI_HIST = 10,
TypeCVI_VA = 16,
TypeCVI_VA_DV = 17,
TypeCVI_VINLANESAT = 18,
- TypeCVI_VM_LD = 20,
- TypeCVI_VM_NEW_ST = 21,
- TypeCVI_VM_ST = 22,
- TypeCVI_VM_STU = 23,
- TypeCVI_VM_TMP_LD = 24,
- TypeCVI_VM_VP_LDU = 25,
- TypeCVI_VP = 26,
- TypeCVI_VP_VS = 27,
- TypeCVI_VS = 28,
- TypeCVI_VX = 30,
- TypeCVI_VX_DV = 31,
- TypeDUPLEX = 32,
- TypeENDLOOP = 33,
- TypeEXTENDER = 34,
- TypeJ = 35,
- TypeLD = 36,
- TypeM = 37,
- TypeMAPPING = 38,
- TypeNCJ = 39,
- TypePSEUDO = 40,
- TypeST = 41,
- TypeSUBINSN = 42,
- TypeS_2op = 43,
- TypeS_3op = 44,
- TypeV2LDST = 47,
- TypeV4LDST = 48
+ TypeCVI_VM_LD = 19,
+ TypeCVI_VM_NEW_ST = 20,
+ TypeCVI_VM_ST = 21,
+ TypeCVI_VM_STU = 22,
+ TypeCVI_VM_TMP_LD = 23,
+ TypeCVI_VM_VP_LDU = 24,
+ TypeCVI_VP = 25,
+ TypeCVI_VP_VS = 26,
+ TypeCVI_VS = 27,
+ TypeCVI_VX = 29,
+ TypeCVI_VX_DV = 30,
+ TypeCVI_VX_LATE = 31,
+ TypeDUPLEX = 33,
+ TypeENDLOOP = 34,
+ TypeEXTENDER = 35,
+ TypeJ = 36,
+ TypeLD = 37,
+ TypeM = 38,
+ TypeMAPPING = 39,
+ TypeNCJ = 40,
+ TypePSEUDO = 41,
+ TypeST = 42,
+ TypeSUBINSN = 43,
+ TypeS_2op = 44,
+ TypeS_3op = 45,
+ TypeV2LDST = 48,
+ TypeV4LDST = 49
};
}
}
diff --git a/lib/Target/Hexagon/HexagonDepITypes.td b/lib/Target/Hexagon/HexagonDepITypes.td
index b35f7ba6d2ab..ac1989e4dd82 100644
--- a/lib/Target/Hexagon/HexagonDepITypes.td
+++ b/lib/Target/Hexagon/HexagonDepITypes.td
@@ -13,35 +13,35 @@ def TypeALU32_3op : IType<1>;
def TypeALU32_ADDI : IType<2>;
def TypeALU64 : IType<3>;
def TypeCJ : IType<4>;
-def TypeCOPROC_VMEM : IType<5>;
-def TypeCR : IType<7>;
+def TypeCR : IType<6>;
def TypeCVI_HIST : IType<10>;
def TypeCVI_VA : IType<16>;
def TypeCVI_VA_DV : IType<17>;
def TypeCVI_VINLANESAT : IType<18>;
-def TypeCVI_VM_LD : IType<20>;
-def TypeCVI_VM_NEW_ST : IType<21>;
-def TypeCVI_VM_ST : IType<22>;
-def TypeCVI_VM_STU : IType<23>;
-def TypeCVI_VM_TMP_LD : IType<24>;
-def TypeCVI_VM_VP_LDU : IType<25>;
-def TypeCVI_VP : IType<26>;
-def TypeCVI_VP_VS : IType<27>;
-def TypeCVI_VS : IType<28>;
-def TypeCVI_VX : IType<30>;
-def TypeCVI_VX_DV : IType<31>;
-def TypeDUPLEX : IType<32>;
-def TypeENDLOOP : IType<33>;
-def TypeEXTENDER : IType<34>;
-def TypeJ : IType<35>;
-def TypeLD : IType<36>;
-def TypeM : IType<37>;
-def TypeMAPPING : IType<38>;
-def TypeNCJ : IType<39>;
-def TypePSEUDO : IType<40>;
-def TypeST : IType<41>;
-def TypeSUBINSN : IType<42>;
-def TypeS_2op : IType<43>;
-def TypeS_3op : IType<44>;
-def TypeV2LDST : IType<47>;
-def TypeV4LDST : IType<48>;
+def TypeCVI_VM_LD : IType<19>;
+def TypeCVI_VM_NEW_ST : IType<20>;
+def TypeCVI_VM_ST : IType<21>;
+def TypeCVI_VM_STU : IType<22>;
+def TypeCVI_VM_TMP_LD : IType<23>;
+def TypeCVI_VM_VP_LDU : IType<24>;
+def TypeCVI_VP : IType<25>;
+def TypeCVI_VP_VS : IType<26>;
+def TypeCVI_VS : IType<27>;
+def TypeCVI_VX : IType<29>;
+def TypeCVI_VX_DV : IType<30>;
+def TypeCVI_VX_LATE : IType<31>;
+def TypeDUPLEX : IType<33>;
+def TypeENDLOOP : IType<34>;
+def TypeEXTENDER : IType<35>;
+def TypeJ : IType<36>;
+def TypeLD : IType<37>;
+def TypeM : IType<38>;
+def TypeMAPPING : IType<39>;
+def TypeNCJ : IType<40>;
+def TypePSEUDO : IType<41>;
+def TypeST : IType<42>;
+def TypeSUBINSN : IType<43>;
+def TypeS_2op : IType<44>;
+def TypeS_3op : IType<45>;
+def TypeV2LDST : IType<48>;
+def TypeV4LDST : IType<49>;
diff --git a/lib/Target/Hexagon/HexagonDepInstrFormats.td b/lib/Target/Hexagon/HexagonDepInstrFormats.td
index d7a99f48803b..1b24be477158 100644
--- a/lib/Target/Hexagon/HexagonDepInstrFormats.td
+++ b/lib/Target/Hexagon/HexagonDepInstrFormats.td
@@ -7,233 +7,140 @@
//
//===----------------------------------------------------------------------===//
-class Enc_12122225 : OpcodeHexagon {
- bits <5> Rt32;
- let Inst{20-16} = Rt32{4-0};
- bits <5> Vx32;
- let Inst{7-3} = Vx32{4-0};
- bits <3> Qd8;
- let Inst{2-0} = Qd8{2-0};
-}
-class Enc_16626097 : OpcodeHexagon {
- bits <2> Qs4;
- let Inst{6-5} = Qs4{1-0};
- bits <5> Rt32;
- let Inst{20-16} = Rt32{4-0};
- bits <1> Mu2;
- let Inst{13-13} = Mu2{0-0};
- bits <5> Vv32;
- let Inst{12-8} = Vv32{4-0};
- bits <5> Vw32;
- let Inst{4-0} = Vw32{4-0};
-}
-class Enc_13397056 : OpcodeHexagon {
- bits <3> Ii;
- let Inst{10-8} = Ii{2-0};
- bits <2> Qv4;
- let Inst{12-11} = Qv4{1-0};
- bits <5> Vs32;
- let Inst{4-0} = Vs32{4-0};
- bits <5> Rx32;
- let Inst{20-16} = Rx32{4-0};
-}
-class Enc_7315939 : OpcodeHexagon {
- bits <11> Ii;
- let Inst{21-20} = Ii{10-9};
- let Inst{7-1} = Ii{8-2};
- bits <4> Rs16;
- let Inst{19-16} = Rs16{3-0};
- bits <6> n1;
- let Inst{28-28} = n1{5-5};
- let Inst{24-22} = n1{4-2};
- let Inst{13-13} = n1{1-1};
- let Inst{8-8} = n1{0-0};
-}
-class Enc_15275738 : OpcodeHexagon {
- bits <12> Ii;
- let Inst{26-25} = Ii{11-10};
- let Inst{13-5} = Ii{9-1};
+class Enc_890909 : OpcodeHexagon {
bits <5> Rs32;
let Inst{20-16} = Rs32{4-0};
bits <5> Rd32;
let Inst{4-0} = Rd32{4-0};
-}
-class Enc_12822813 : OpcodeHexagon {
- bits <5> Rss32;
- let Inst{20-16} = Rss32{4-0};
- bits <5> Rtt32;
- let Inst{12-8} = Rtt32{4-0};
- bits <5> Rxx32;
- let Inst{4-0} = Rxx32{4-0};
bits <2> Pe4;
let Inst{6-5} = Pe4{1-0};
}
-class Enc_10282127 : OpcodeHexagon {
- bits <7> Ii;
- let Inst{12-7} = Ii{6-1};
- bits <8> II;
- let Inst{13-13} = II{7-7};
- let Inst{6-0} = II{6-0};
+class Enc_527412 : OpcodeHexagon {
+ bits <2> Ps4;
+ let Inst{17-16} = Ps4{1-0};
+ bits <2> Pt4;
+ let Inst{9-8} = Pt4{1-0};
+ bits <5> Rd32;
+ let Inst{4-0} = Rd32{4-0};
+}
+class Enc_efaed8 : OpcodeHexagon {
+ bits <1> Ii;
+ let Inst{8-8} = Ii{0-0};
+}
+class Enc_a568d4 : OpcodeHexagon {
+ bits <5> Rt32;
+ let Inst{12-8} = Rt32{4-0};
bits <5> Rs32;
let Inst{20-16} = Rs32{4-0};
+ bits <5> Rx32;
+ let Inst{4-0} = Rx32{4-0};
}
-class Enc_14264243 : OpcodeHexagon {
+class Enc_27b757 : OpcodeHexagon {
+ bits <4> Ii;
+ let Inst{13-13} = Ii{3-3};
+ let Inst{10-8} = Ii{2-0};
+ bits <2> Pv4;
+ let Inst{12-11} = Pv4{1-0};
+ bits <5> Rt32;
+ let Inst{20-16} = Rt32{4-0};
+ bits <5> Vs32;
+ let Inst{4-0} = Vs32{4-0};
+}
+class Enc_5de85f : OpcodeHexagon {
bits <11> Ii;
let Inst{21-20} = Ii{10-9};
let Inst{7-1} = Ii{8-2};
- bits <4> Rs16;
- let Inst{19-16} = Rs16{3-0};
- bits <4> Rt16;
- let Inst{11-8} = Rt16{3-0};
-}
-class Enc_6778937 : OpcodeHexagon {
- bits <5> Rxx32;
- let Inst{20-16} = Rxx32{4-0};
- bits <0> sgp10;
-}
-class Enc_5480539 : OpcodeHexagon {
- bits <5> Vu32;
- let Inst{20-16} = Vu32{4-0};
- bits <5> Vv32;
- let Inst{12-8} = Vv32{4-0};
- bits <3> Rt8;
- let Inst{2-0} = Rt8{2-0};
- bits <5> Vxx32;
- let Inst{7-3} = Vxx32{4-0};
+ bits <5> Rt32;
+ let Inst{12-8} = Rt32{4-0};
+ bits <3> Ns8;
+ let Inst{18-16} = Ns8{2-0};
}
-class Enc_11422009 : OpcodeHexagon {
+class Enc_0e41fa : OpcodeHexagon {
+ bits <5> Vuu32;
+ let Inst{12-8} = Vuu32{4-0};
bits <5> Rt32;
let Inst{20-16} = Rt32{4-0};
- bits <5> Vy32;
- let Inst{12-8} = Vy32{4-0};
- bits <5> Vx32;
- let Inst{4-0} = Vx32{4-0};
-}
-class Enc_16357011 : OpcodeHexagon {
- bits <5> Vu32;
- let Inst{20-16} = Vu32{4-0};
- bits <5> Vv32;
- let Inst{8-4} = Vv32{4-0};
- bits <5> Vt32;
- let Inst{13-9} = Vt32{4-0};
- bits <4> Vdd16;
- let Inst{3-0} = Vdd16{3-0};
-}
-class Enc_4975051 : OpcodeHexagon {
- bits <19> Ii;
- let Inst{26-25} = Ii{18-17};
- let Inst{20-16} = Ii{16-12};
- let Inst{13-5} = Ii{11-3};
- bits <5> Rdd32;
- let Inst{4-0} = Rdd32{4-0};
-}
-class Enc_14786238 : OpcodeHexagon {
- bits <5> Vu32;
- let Inst{12-8} = Vu32{4-0};
- bits <5> Rtt32;
- let Inst{20-16} = Rtt32{4-0};
- bits <5> Vx32;
- let Inst{7-3} = Vx32{4-0};
-}
-class Enc_15472748 : OpcodeHexagon {
- bits <5> Rs32;
- let Inst{20-16} = Rs32{4-0};
- bits <5> Rtt32;
- let Inst{12-8} = Rtt32{4-0};
- bits <5> Rd32;
- let Inst{4-0} = Rd32{4-0};
-}
-class Enc_6773159 : OpcodeHexagon {
- bits <6> Ii;
- let Inst{12-7} = Ii{5-0};
- bits <5> II;
- let Inst{4-0} = II{4-0};
- bits <5> Rs32;
- let Inst{20-16} = Rs32{4-0};
+ bits <5> Vd32;
+ let Inst{4-0} = Vd32{4-0};
}
-class Enc_12535811 : OpcodeHexagon {
+class Enc_802dc0 : OpcodeHexagon {
+ bits <1> Ii;
+ let Inst{8-8} = Ii{0-0};
bits <2> Qv4;
let Inst{23-22} = Qv4{1-0};
- bits <5> Vu32;
- let Inst{12-8} = Vu32{4-0};
- bits <5> Vx32;
- let Inst{4-0} = Vx32{4-0};
}
-class Enc_14007201 : OpcodeHexagon {
- bits <8> Ii;
- let Inst{12-5} = Ii{7-0};
- bits <8> II;
- let Inst{22-16} = II{7-1};
- let Inst{13-13} = II{0-0};
- bits <5> Rdd32;
- let Inst{4-0} = Rdd32{4-0};
+class Enc_6b197f : OpcodeHexagon {
+ bits <4> Ii;
+ let Inst{8-5} = Ii{3-0};
+ bits <5> Ryy32;
+ let Inst{4-0} = Ryy32{4-0};
+ bits <5> Rx32;
+ let Inst{20-16} = Rx32{4-0};
}
-class Enc_2577026 : OpcodeHexagon {
- bits <3> Qt8;
- let Inst{2-0} = Qt8{2-0};
- bits <5> Vu32;
- let Inst{20-16} = Vu32{4-0};
- bits <5> Vv32;
- let Inst{12-8} = Vv32{4-0};
- bits <5> Vdd32;
- let Inst{7-3} = Vdd32{4-0};
+class Enc_1f5d8f : OpcodeHexagon {
+ bits <1> Mu2;
+ let Inst{13-13} = Mu2{0-0};
+ bits <5> Ryy32;
+ let Inst{4-0} = Ryy32{4-0};
+ bits <5> Rx32;
+ let Inst{20-16} = Rx32{4-0};
}
-class Enc_7305764 : OpcodeHexagon {
- bits <5> II;
- let Inst{12-8} = II{4-0};
- bits <11> Ii;
- let Inst{21-20} = Ii{10-9};
- let Inst{7-1} = Ii{8-2};
- bits <4> Rs16;
- let Inst{19-16} = Rs16{3-0};
+class Enc_51436c : OpcodeHexagon {
+ bits <16> Ii;
+ let Inst{23-22} = Ii{15-14};
+ let Inst{13-0} = Ii{13-0};
+ bits <5> Rx32;
+ let Inst{20-16} = Rx32{4-0};
}
-class Enc_11682941 : OpcodeHexagon {
- bits <19> Ii;
- let Inst{26-25} = Ii{18-17};
- let Inst{20-16} = Ii{16-12};
- let Inst{13-13} = Ii{11-11};
- let Inst{7-0} = Ii{10-3};
+class Enc_c7a204 : OpcodeHexagon {
+ bits <6> II;
+ let Inst{5-0} = II{5-0};
bits <5> Rtt32;
let Inst{12-8} = Rtt32{4-0};
+ bits <5> Re32;
+ let Inst{20-16} = Re32{4-0};
}
-class Enc_16376009 : OpcodeHexagon {
+class Enc_db40cd : OpcodeHexagon {
bits <6> Ii;
- let Inst{8-5} = Ii{5-2};
- bits <5> Rd32;
- let Inst{4-0} = Rd32{4-0};
+ let Inst{6-3} = Ii{5-2};
+ bits <5> Rt32;
+ let Inst{12-8} = Rt32{4-0};
bits <5> Rx32;
let Inst{20-16} = Rx32{4-0};
}
-class Enc_13249928 : OpcodeHexagon {
- bits <9> Ii;
- let Inst{13-5} = Ii{8-0};
+class Enc_a1e29d : OpcodeHexagon {
+ bits <5> Ii;
+ let Inst{12-8} = Ii{4-0};
+ bits <5> II;
+ let Inst{22-21} = II{4-3};
+ let Inst{7-5} = II{2-0};
bits <5> Rs32;
let Inst{20-16} = Rs32{4-0};
- bits <2> Pd4;
- let Inst{1-0} = Pd4{1-0};
+ bits <5> Rx32;
+ let Inst{4-0} = Rx32{4-0};
}
-class Enc_1971351 : OpcodeHexagon {
- bits <5> Ii;
- let Inst{8-5} = Ii{4-1};
+class Enc_d15d19 : OpcodeHexagon {
bits <1> Mu2;
let Inst{13-13} = Mu2{0-0};
- bits <5> Ryy32;
- let Inst{4-0} = Ryy32{4-0};
+ bits <5> Vs32;
+ let Inst{4-0} = Vs32{4-0};
bits <5> Rx32;
let Inst{20-16} = Rx32{4-0};
}
-class Enc_13715847 : OpcodeHexagon {
- bits <6> Ii;
- let Inst{17-16} = Ii{5-4};
- let Inst{6-3} = Ii{3-0};
- bits <2> Pv4;
- let Inst{1-0} = Pv4{1-0};
- bits <5> Rtt32;
- let Inst{12-8} = Rtt32{4-0};
+class Enc_e90a15 : OpcodeHexagon {
+ bits <11> Ii;
+ let Inst{21-20} = Ii{10-9};
+ let Inst{7-1} = Ii{8-2};
+ bits <3> Ns8;
+ let Inst{18-16} = Ns8{2-0};
+ bits <4> n1;
+ let Inst{29-29} = n1{3-3};
+ let Inst{26-25} = n1{2-1};
+ let Inst{22-22} = n1{0-0};
}
-class Enc_13303422 : OpcodeHexagon {
- bits <5> Ii;
- let Inst{8-5} = Ii{4-1};
+class Enc_e0a47a : OpcodeHexagon {
+ bits <4> Ii;
+ let Inst{8-5} = Ii{3-0};
bits <1> Mu2;
let Inst{13-13} = Mu2{0-0};
bits <5> Rd32;
@@ -241,29 +148,32 @@ class Enc_13303422 : OpcodeHexagon {
bits <5> Rx32;
let Inst{20-16} = Rx32{4-0};
}
-class Enc_14574598 : OpcodeHexagon {
- bits <6> Ii;
- let Inst{13-8} = Ii{5-0};
+class Enc_140c83 : OpcodeHexagon {
+ bits <10> Ii;
+ let Inst{21-21} = Ii{9-9};
+ let Inst{13-5} = Ii{8-0};
bits <5> Rs32;
let Inst{20-16} = Rs32{4-0};
- bits <2> Pd4;
- let Inst{1-0} = Pd4{1-0};
+ bits <5> Rd32;
+ let Inst{4-0} = Rd32{4-0};
}
-class Enc_13094118 : OpcodeHexagon {
- bits <5> Css32;
- let Inst{20-16} = Css32{4-0};
+class Enc_7eee72 : OpcodeHexagon {
+ bits <1> Mu2;
+ let Inst{13-13} = Mu2{0-0};
bits <5> Rdd32;
let Inst{4-0} = Rdd32{4-0};
+ bits <5> Rx32;
+ let Inst{20-16} = Rx32{4-0};
}
-class Enc_4231995 : OpcodeHexagon {
- bits <6> Ii;
- let Inst{13-8} = Ii{5-0};
- bits <5> Rss32;
- let Inst{20-16} = Rss32{4-0};
- bits <5> Rdd32;
- let Inst{4-0} = Rdd32{4-0};
+class Enc_d7dc10 : OpcodeHexagon {
+ bits <5> Rs32;
+ let Inst{20-16} = Rs32{4-0};
+ bits <5> Rtt32;
+ let Inst{12-8} = Rtt32{4-0};
+ bits <2> Pd4;
+ let Inst{1-0} = Pd4{1-0};
}
-class Enc_844699 : OpcodeHexagon {
+class Enc_736575 : OpcodeHexagon {
bits <11> Ii;
let Inst{21-20} = Ii{10-9};
let Inst{7-1} = Ii{8-2};
@@ -271,74 +181,87 @@ class Enc_844699 : OpcodeHexagon {
let Inst{19-16} = Rs16{3-0};
bits <4> n1;
let Inst{28-28} = n1{3-3};
- let Inst{24-22} = n1{2-0};
+ let Inst{25-23} = n1{2-0};
}
-class Enc_8752140 : OpcodeHexagon {
- bits <6> Ii;
- let Inst{8-5} = Ii{5-2};
+class Enc_8dec2e : OpcodeHexagon {
+ bits <5> Ii;
+ let Inst{12-8} = Ii{4-0};
+ bits <5> Rss32;
+ let Inst{20-16} = Rss32{4-0};
+ bits <5> Rd32;
+ let Inst{4-0} = Rd32{4-0};
+}
+class Enc_eaa9f8 : OpcodeHexagon {
+ bits <5> Vu32;
+ let Inst{12-8} = Vu32{4-0};
+ bits <5> Vv32;
+ let Inst{20-16} = Vv32{4-0};
+ bits <2> Qx4;
+ let Inst{1-0} = Qx4{1-0};
+}
+class Enc_509701 : OpcodeHexagon {
+ bits <19> Ii;
+ let Inst{26-25} = Ii{18-17};
+ let Inst{20-16} = Ii{16-12};
+ let Inst{13-5} = Ii{11-3};
bits <5> Rdd32;
let Inst{4-0} = Rdd32{4-0};
- bits <5> Rx32;
- let Inst{20-16} = Rx32{4-0};
}
-class Enc_7978128 : OpcodeHexagon {
- bits <1> Ii;
- let Inst{8-8} = Ii{0-0};
- bits <2> Qv4;
- let Inst{23-22} = Qv4{1-0};
+class Enc_830e5d : OpcodeHexagon {
+ bits <8> Ii;
+ let Inst{12-5} = Ii{7-0};
+ bits <8> II;
+ let Inst{22-16} = II{7-1};
+ let Inst{13-13} = II{0-0};
+ bits <2> Pu4;
+ let Inst{24-23} = Pu4{1-0};
+ bits <5> Rd32;
+ let Inst{4-0} = Rd32{4-0};
}
-class Enc_10492541 : OpcodeHexagon {
+class Enc_79b8c8 : OpcodeHexagon {
bits <6> Ii;
let Inst{6-3} = Ii{5-2};
- bits <5> Rt32;
- let Inst{12-8} = Rt32{4-0};
- bits <5> Rx32;
- let Inst{20-16} = Rx32{4-0};
-}
-class Enc_0 : OpcodeHexagon {
-}
-class Enc_15733946 : OpcodeHexagon {
- bits <2> Pv4;
- let Inst{12-11} = Pv4{1-0};
bits <1> Mu2;
let Inst{13-13} = Mu2{0-0};
- bits <5> Vs32;
- let Inst{4-0} = Vs32{4-0};
+ bits <5> Rt32;
+ let Inst{12-8} = Rt32{4-0};
bits <5> Rx32;
let Inst{20-16} = Rx32{4-0};
}
-class Enc_738356 : OpcodeHexagon {
- bits <4> Ii;
- let Inst{13-13} = Ii{3-3};
+class Enc_58a8bf : OpcodeHexagon {
+ bits <3> Ii;
let Inst{10-8} = Ii{2-0};
bits <2> Pv4;
let Inst{12-11} = Pv4{1-0};
- bits <5> Rt32;
- let Inst{20-16} = Rt32{4-0};
bits <5> Vd32;
let Inst{4-0} = Vd32{4-0};
+ bits <5> Rx32;
+ let Inst{20-16} = Rx32{4-0};
}
-class Enc_14400220 : OpcodeHexagon {
- bits <5> Ii;
- let Inst{9-5} = Ii{4-0};
- bits <5> Rss32;
- let Inst{20-16} = Rss32{4-0};
- bits <2> Pd4;
- let Inst{1-0} = Pd4{1-0};
+class Enc_041d7b : OpcodeHexagon {
+ bits <11> Ii;
+ let Inst{21-20} = Ii{10-9};
+ let Inst{7-1} = Ii{8-2};
+ bits <4> Rs16;
+ let Inst{19-16} = Rs16{3-0};
+ bits <5> n1;
+ let Inst{28-28} = n1{4-4};
+ let Inst{24-23} = n1{3-2};
+ let Inst{13-13} = n1{1-1};
+ let Inst{8-8} = n1{0-0};
}
-class Enc_15194851 : OpcodeHexagon {
+class Enc_f44229 : OpcodeHexagon {
+ bits <7> Ii;
+ let Inst{13-13} = Ii{6-6};
+ let Inst{7-3} = Ii{5-1};
+ bits <2> Pv4;
+ let Inst{1-0} = Pv4{1-0};
bits <5> Rs32;
let Inst{20-16} = Rs32{4-0};
- bits <5> Rt32;
- let Inst{12-8} = Rt32{4-0};
- bits <2> Pu4;
- let Inst{6-5} = Pu4{1-0};
- bits <5> Rx32;
- let Inst{4-0} = Rx32{4-0};
+ bits <3> Nt8;
+ let Inst{10-8} = Nt8{2-0};
}
-class Enc_14172170 : OpcodeHexagon {
- bits <1> Ii;
- let Inst{5-5} = Ii{0-0};
+class Enc_aad80c : OpcodeHexagon {
bits <5> Vuu32;
let Inst{12-8} = Vuu32{4-0};
bits <5> Rt32;
@@ -346,413 +269,269 @@ class Enc_14172170 : OpcodeHexagon {
bits <5> Vdd32;
let Inst{4-0} = Vdd32{4-0};
}
-class Enc_10065510 : OpcodeHexagon {
- bits <6> Ii;
- let Inst{6-3} = Ii{5-2};
+class Enc_87c142 : OpcodeHexagon {
+ bits <7> Ii;
+ let Inst{8-4} = Ii{6-2};
+ bits <4> Rt16;
+ let Inst{3-0} = Rt16{3-0};
+}
+class Enc_86a14b : OpcodeHexagon {
+ bits <8> Ii;
+ let Inst{7-3} = Ii{7-3};
+ bits <3> Rdd8;
+ let Inst{2-0} = Rdd8{2-0};
+}
+class Enc_9a33d5 : OpcodeHexagon {
+ bits <7> Ii;
+ let Inst{6-3} = Ii{6-3};
bits <2> Pv4;
let Inst{1-0} = Pv4{1-0};
- bits <5> Rt32;
- let Inst{12-8} = Rt32{4-0};
+ bits <5> Rtt32;
+ let Inst{12-8} = Rtt32{4-0};
bits <5> Rx32;
let Inst{20-16} = Rx32{4-0};
}
-class Enc_14998517 : OpcodeHexagon {
- bits <11> Ii;
- let Inst{21-20} = Ii{10-9};
- let Inst{7-1} = Ii{8-2};
- bits <3> Ns8;
- let Inst{18-16} = Ns8{2-0};
- bits <3> n1;
- let Inst{29-29} = n1{2-2};
- let Inst{26-25} = n1{1-0};
+class Enc_a56825 : OpcodeHexagon {
+ bits <5> Rss32;
+ let Inst{20-16} = Rss32{4-0};
+ bits <5> Rtt32;
+ let Inst{12-8} = Rtt32{4-0};
+ bits <5> Rdd32;
+ let Inst{4-0} = Rdd32{4-0};
}
-class Enc_16657398 : OpcodeHexagon {
- bits <6> Ii;
- let Inst{17-16} = Ii{5-4};
- let Inst{6-3} = Ii{3-0};
- bits <2> Pv4;
- let Inst{1-0} = Pv4{1-0};
+class Enc_9ea4cf : OpcodeHexagon {
+ bits <2> Ii;
+ let Inst{13-13} = Ii{1-1};
+ let Inst{6-6} = Ii{0-0};
+ bits <6> II;
+ let Inst{5-0} = II{5-0};
+ bits <5> Ru32;
+ let Inst{20-16} = Ru32{4-0};
bits <5> Rt32;
let Inst{12-8} = Rt32{4-0};
}
-class Enc_14620934 : OpcodeHexagon {
- bits <5> Rs32;
- let Inst{20-16} = Rs32{4-0};
- bits <5> Rt32;
- let Inst{12-8} = Rt32{4-0};
+class Enc_ee5ed0 : OpcodeHexagon {
+ bits <4> Rs16;
+ let Inst{7-4} = Rs16{3-0};
+ bits <4> Rd16;
+ let Inst{3-0} = Rd16{3-0};
+ bits <2> n1;
+ let Inst{9-8} = n1{1-0};
}
-class Enc_10075393 : OpcodeHexagon {
- bits <4> Ii;
- let Inst{13-13} = Ii{3-3};
- let Inst{10-8} = Ii{2-0};
- bits <2> Pv4;
- let Inst{12-11} = Pv4{1-0};
+class Enc_935d9b : OpcodeHexagon {
+ bits <5> Ii;
+ let Inst{6-3} = Ii{4-1};
+ bits <1> Mu2;
+ let Inst{13-13} = Mu2{0-0};
bits <5> Rt32;
- let Inst{20-16} = Rt32{4-0};
- bits <5> Vs32;
- let Inst{4-0} = Vs32{4-0};
-}
-class Enc_8638014 : OpcodeHexagon {
- bits <16> Ii;
- let Inst{21-21} = Ii{15-15};
- let Inst{13-8} = Ii{14-9};
- let Inst{2-0} = Ii{8-6};
- bits <5> Vss32;
- let Inst{7-3} = Vss32{4-0};
+ let Inst{12-8} = Rt32{4-0};
bits <5> Rx32;
let Inst{20-16} = Rx32{4-0};
}
-class Enc_13261538 : OpcodeHexagon {
- bits <3> Ii;
- let Inst{7-5} = Ii{2-0};
- bits <5> Vu32;
- let Inst{12-8} = Vu32{4-0};
- bits <5> Vv32;
- let Inst{20-16} = Vv32{4-0};
- bits <5> Vdd32;
- let Inst{4-0} = Vdd32{4-0};
+class Enc_61f0b0 : OpcodeHexagon {
+ bits <5> Rs32;
+ let Inst{20-16} = Rs32{4-0};
+ bits <5> Rt32;
+ let Inst{12-8} = Rt32{4-0};
+ bits <5> Rxx32;
+ let Inst{4-0} = Rxx32{4-0};
}
-class Enc_8990840 : OpcodeHexagon {
- bits <13> Ii;
- let Inst{26-25} = Ii{12-11};
- let Inst{13-5} = Ii{10-2};
+class Enc_bd6011 : OpcodeHexagon {
+ bits <5> Rt32;
+ let Inst{12-8} = Rt32{4-0};
bits <5> Rs32;
let Inst{20-16} = Rs32{4-0};
bits <5> Rd32;
let Inst{4-0} = Rd32{4-0};
}
-class Enc_5974204 : OpcodeHexagon {
- bits <5> Vu32;
- let Inst{20-16} = Vu32{4-0};
- bits <5> Vvv32;
- let Inst{12-8} = Vvv32{4-0};
- bits <5> Vd32;
- let Inst{7-3} = Vd32{4-0};
+class Enc_65d691 : OpcodeHexagon {
+ bits <2> Ps4;
+ let Inst{17-16} = Ps4{1-0};
+ bits <2> Pd4;
+ let Inst{1-0} = Pd4{1-0};
}
-class Enc_4711514 : OpcodeHexagon {
- bits <2> Qu4;
- let Inst{9-8} = Qu4{1-0};
+class Enc_e8c45e : OpcodeHexagon {
+ bits <7> Ii;
+ let Inst{13-13} = Ii{6-6};
+ let Inst{7-3} = Ii{5-1};
+ bits <2> Pv4;
+ let Inst{1-0} = Pv4{1-0};
+ bits <5> Rs32;
+ let Inst{20-16} = Rs32{4-0};
bits <5> Rt32;
- let Inst{20-16} = Rt32{4-0};
- bits <5> Vd32;
- let Inst{4-0} = Vd32{4-0};
+ let Inst{12-8} = Rt32{4-0};
}
-class Enc_11492529 : OpcodeHexagon {
- bits <5> Ii;
- let Inst{6-3} = Ii{4-1};
+class Enc_ca3887 : OpcodeHexagon {
+ bits <5> Rs32;
+ let Inst{20-16} = Rs32{4-0};
bits <5> Rt32;
let Inst{12-8} = Rt32{4-0};
- bits <5> Rx32;
- let Inst{20-16} = Rx32{4-0};
}
-class Enc_9277990 : OpcodeHexagon {
- bits <5> Rss32;
- let Inst{20-16} = Rss32{4-0};
- bits <5> Rtt32;
- let Inst{12-8} = Rtt32{4-0};
+class Enc_a94f3b : OpcodeHexagon {
+ bits <5> Rs32;
+ let Inst{20-16} = Rs32{4-0};
+ bits <5> Rt32;
+ let Inst{12-8} = Rt32{4-0};
bits <5> Rd32;
let Inst{4-0} = Rd32{4-0};
+ bits <2> Pe4;
+ let Inst{6-5} = Pe4{1-0};
}
-class Enc_6690615 : OpcodeHexagon {
- bits <7> Ii;
- let Inst{8-4} = Ii{6-2};
+class Enc_625deb : OpcodeHexagon {
+ bits <4> Ii;
+ let Inst{10-8} = Ii{3-1};
+ bits <4> Rs16;
+ let Inst{7-4} = Rs16{3-0};
bits <4> Rt16;
let Inst{3-0} = Rt16{3-0};
}
-class Enc_1220199 : OpcodeHexagon {
- bits <2> Qv4;
- let Inst{23-22} = Qv4{1-0};
- bits <5> Vu32;
- let Inst{12-8} = Vu32{4-0};
- bits <5> Vd32;
- let Inst{4-0} = Vd32{4-0};
-}
-class Enc_7785569 : OpcodeHexagon {
- bits <11> Ii;
- let Inst{21-20} = Ii{10-9};
- let Inst{7-1} = Ii{8-2};
- bits <4> Rs16;
- let Inst{19-16} = Rs16{3-0};
- bits <6> n1;
- let Inst{28-28} = n1{5-5};
- let Inst{25-22} = n1{4-1};
- let Inst{8-8} = n1{0-0};
+class Enc_1f5ba6 : OpcodeHexagon {
+ bits <4> Rd16;
+ let Inst{3-0} = Rd16{3-0};
}
-class Enc_2880796 : OpcodeHexagon {
- bits <5> Ii;
- let Inst{12-8} = Ii{4-0};
- bits <5> II;
- let Inst{22-21} = II{4-3};
- let Inst{7-5} = II{2-0};
+class Enc_cd82bc : OpcodeHexagon {
+ bits <4> Ii;
+ let Inst{21-21} = Ii{3-3};
+ let Inst{7-5} = Ii{2-0};
+ bits <6> II;
+ let Inst{13-8} = II{5-0};
bits <5> Rs32;
let Inst{20-16} = Rs32{4-0};
bits <5> Rx32;
let Inst{4-0} = Rx32{4-0};
}
-class Enc_6858527 : OpcodeHexagon {
- bits <2> Qs4;
- let Inst{6-5} = Qs4{1-0};
- bits <5> Rt32;
- let Inst{20-16} = Rt32{4-0};
- bits <1> Mu2;
- let Inst{13-13} = Mu2{0-0};
- bits <5> Vv32;
- let Inst{4-0} = Vv32{4-0};
-}
-class Enc_11863656 : OpcodeHexagon {
- bits <5> Vu32;
- let Inst{12-8} = Vu32{4-0};
- bits <5> Rtt32;
- let Inst{20-16} = Rtt32{4-0};
- bits <5> Vx32;
- let Inst{4-0} = Vx32{4-0};
-}
-class Enc_151014 : OpcodeHexagon {
- bits <5> Rss32;
- let Inst{20-16} = Rss32{4-0};
- bits <5> Rtt32;
- let Inst{12-8} = Rtt32{4-0};
- bits <5> Rdd32;
- let Inst{4-0} = Rdd32{4-0};
- bits <2> Px4;
- let Inst{6-5} = Px4{1-0};
-}
-class Enc_10333841 : OpcodeHexagon {
- bits <16> Ii;
- let Inst{21-21} = Ii{15-15};
- let Inst{13-8} = Ii{14-9};
- let Inst{2-0} = Ii{8-6};
- bits <5> Rt32;
- let Inst{20-16} = Rt32{4-0};
- bits <5> Vd32;
- let Inst{7-3} = Vd32{4-0};
+class Enc_399e12 : OpcodeHexagon {
+ bits <4> Rs16;
+ let Inst{7-4} = Rs16{3-0};
+ bits <3> Rdd8;
+ let Inst{2-0} = Rdd8{2-0};
}
-class Enc_14044877 : OpcodeHexagon {
+class Enc_d7a65e : OpcodeHexagon {
bits <6> Ii;
- let Inst{13-13} = Ii{5-5};
- let Inst{7-3} = Ii{4-0};
+ let Inst{12-7} = Ii{5-0};
+ bits <6> II;
+ let Inst{13-13} = II{5-5};
+ let Inst{4-0} = II{4-0};
bits <2> Pv4;
- let Inst{1-0} = Pv4{1-0};
+ let Inst{6-5} = Pv4{1-0};
bits <5> Rs32;
let Inst{20-16} = Rs32{4-0};
- bits <5> Rt32;
- let Inst{12-8} = Rt32{4-0};
-}
-class Enc_13691337 : OpcodeHexagon {
- bits <5> Vu32;
- let Inst{12-8} = Vu32{4-0};
- bits <5> Vv32;
- let Inst{20-16} = Vv32{4-0};
- bits <5> Vd32;
- let Inst{4-0} = Vd32{4-0};
- bits <2> Qx4;
- let Inst{6-5} = Qx4{1-0};
}
-class Enc_3817033 : OpcodeHexagon {
- bits <5> Vuu32;
- let Inst{20-16} = Vuu32{4-0};
- bits <3> Qt8;
- let Inst{10-8} = Qt8{2-0};
- bits <5> Vdd32;
- let Inst{7-3} = Vdd32{4-0};
+class Enc_607661 : OpcodeHexagon {
+ bits <6> Ii;
+ let Inst{12-7} = Ii{5-0};
+ bits <5> Rd32;
+ let Inst{4-0} = Rd32{4-0};
}
-class Enc_3540372 : OpcodeHexagon {
- bits <5> Rtt32;
- let Inst{20-16} = Rtt32{4-0};
- bits <5> Vd32;
- let Inst{7-3} = Vd32{4-0};
+class Enc_6a5972 : OpcodeHexagon {
+ bits <11> Ii;
+ let Inst{21-20} = Ii{10-9};
+ let Inst{7-1} = Ii{8-2};
+ bits <4> Rs16;
+ let Inst{19-16} = Rs16{3-0};
+ bits <4> Rt16;
+ let Inst{11-8} = Rt16{3-0};
}
-class Enc_5200852 : OpcodeHexagon {
- bits <1> Mu2;
- let Inst{13-13} = Mu2{0-0};
- bits <5> Vd32;
- let Inst{7-3} = Vd32{4-0};
- bits <5> Rx32;
- let Inst{20-16} = Rx32{4-0};
+class Enc_53dca9 : OpcodeHexagon {
+ bits <6> Ii;
+ let Inst{11-8} = Ii{5-2};
+ bits <4> Rs16;
+ let Inst{7-4} = Rs16{3-0};
+ bits <4> Rd16;
+ let Inst{3-0} = Rd16{3-0};
}
-class Enc_15949334 : OpcodeHexagon {
+class Enc_27fd0e : OpcodeHexagon {
+ bits <6> Ii;
+ let Inst{8-5} = Ii{5-2};
bits <1> Mu2;
let Inst{13-13} = Mu2{0-0};
- bits <5> Vd32;
- let Inst{4-0} = Vd32{4-0};
+ bits <5> Rd32;
+ let Inst{4-0} = Rd32{4-0};
bits <5> Rx32;
let Inst{20-16} = Rx32{4-0};
}
-class Enc_3831744 : OpcodeHexagon {
- bits <5> Rss32;
- let Inst{20-16} = Rss32{4-0};
- bits <5> Rtt32;
- let Inst{12-8} = Rtt32{4-0};
- bits <2> Pd4;
- let Inst{1-0} = Pd4{1-0};
-}
-class Enc_8280533 : OpcodeHexagon {
- bits <3> Ii;
- let Inst{7-5} = Ii{2-0};
- bits <5> Vu32;
- let Inst{12-8} = Vu32{4-0};
- bits <5> Vv32;
- let Inst{20-16} = Vv32{4-0};
- bits <5> Vx32;
- let Inst{4-0} = Vx32{4-0};
-}
-class Enc_10969213 : OpcodeHexagon {
- bits <5> Rt32;
- let Inst{20-16} = Rt32{4-0};
- bits <1> Mu2;
- let Inst{13-13} = Mu2{0-0};
- bits <5> Vvv32;
- let Inst{12-8} = Vvv32{4-0};
- bits <5> Vw32;
- let Inst{4-0} = Vw32{4-0};
-}
-class Enc_3974695 : OpcodeHexagon {
+class Enc_93af4c : OpcodeHexagon {
bits <7> Ii;
let Inst{10-4} = Ii{6-0};
bits <4> Rx16;
let Inst{3-0} = Rx16{3-0};
}
-class Enc_7255914 : OpcodeHexagon {
- bits <1> Mu2;
- let Inst{13-13} = Mu2{0-0};
- bits <5> Rt32;
- let Inst{12-8} = Rt32{4-0};
+class Enc_5bdd42 : OpcodeHexagon {
+ bits <7> Ii;
+ let Inst{8-5} = Ii{6-3};
+ bits <5> Rdd32;
+ let Inst{4-0} = Rdd32{4-0};
bits <5> Rx32;
let Inst{20-16} = Rx32{4-0};
}
-class Enc_7212930 : OpcodeHexagon {
- bits <5> Ii;
- let Inst{8-5} = Ii{4-1};
- bits <2> Pt4;
- let Inst{10-9} = Pt4{1-0};
- bits <5> Rd32;
- let Inst{4-0} = Rd32{4-0};
+class Enc_71f1b4 : OpcodeHexagon {
+ bits <6> Ii;
+ let Inst{8-5} = Ii{5-2};
+ bits <5> Rdd32;
+ let Inst{4-0} = Rdd32{4-0};
bits <5> Rx32;
let Inst{20-16} = Rx32{4-0};
}
-class Enc_12781442 : OpcodeHexagon {
- bits <5> Rt32;
- let Inst{20-16} = Rt32{4-0};
- bits <2> Qd4;
- let Inst{1-0} = Qd4{1-0};
-}
-class Enc_799555 : OpcodeHexagon {
- bits <5> Vd32;
- let Inst{7-3} = Vd32{4-0};
-}
-class Enc_11083408 : OpcodeHexagon {
- bits <5> Vu32;
- let Inst{12-8} = Vu32{4-0};
- bits <5> Vv32;
- let Inst{23-19} = Vv32{4-0};
- bits <3> Rt8;
- let Inst{18-16} = Rt8{2-0};
- bits <5> Vd32;
- let Inst{4-0} = Vd32{4-0};
-}
-class Enc_900013 : OpcodeHexagon {
- bits <5> Vu32;
- let Inst{12-8} = Vu32{4-0};
- bits <5> Vd32;
- let Inst{4-0} = Vd32{4-0};
-}
-class Enc_9487067 : OpcodeHexagon {
- bits <12> Ii;
- let Inst{19-16} = Ii{11-8};
- let Inst{12-5} = Ii{7-0};
- bits <2> Pu4;
- let Inst{22-21} = Pu4{1-0};
- bits <5> Rd32;
- let Inst{4-0} = Rd32{4-0};
-}
-class Enc_16014536 : OpcodeHexagon {
- bits <10> Ii;
- let Inst{21-21} = Ii{9-9};
- let Inst{13-5} = Ii{8-0};
- bits <5> Rs32;
- let Inst{20-16} = Rs32{4-0};
- bits <2> Pd4;
- let Inst{1-0} = Pd4{1-0};
-}
-class Enc_12419313 : OpcodeHexagon {
+class Enc_14640c : OpcodeHexagon {
bits <11> Ii;
let Inst{21-20} = Ii{10-9};
let Inst{7-1} = Ii{8-2};
bits <4> Rs16;
let Inst{19-16} = Rs16{3-0};
- bits <4> n1;
- let Inst{28-28} = n1{3-3};
- let Inst{24-23} = n1{2-1};
+ bits <5> n1;
+ let Inst{28-28} = n1{4-4};
+ let Inst{24-22} = n1{3-1};
let Inst{13-13} = n1{0-0};
}
-class Enc_5503430 : OpcodeHexagon {
- bits <5> Vuu32;
- let Inst{12-8} = Vuu32{4-0};
- bits <5> Rt32;
- let Inst{20-16} = Rt32{4-0};
- bits <5> Vdd32;
- let Inst{7-3} = Vdd32{4-0};
-}
-class Enc_14767681 : OpcodeHexagon {
- bits <5> Vu32;
- let Inst{12-8} = Vu32{4-0};
- bits <5> Vv32;
- let Inst{23-19} = Vv32{4-0};
- bits <3> Rt8;
- let Inst{18-16} = Rt8{2-0};
- bits <5> Vdd32;
- let Inst{4-0} = Vdd32{4-0};
-}
-class Enc_9093094 : OpcodeHexagon {
- bits <8> Ii;
- let Inst{12-5} = Ii{7-0};
- bits <8> II;
- let Inst{22-16} = II{7-1};
- let Inst{13-13} = II{0-0};
- bits <2> Pu4;
- let Inst{24-23} = Pu4{1-0};
- bits <5> Rd32;
- let Inst{4-0} = Rd32{4-0};
-}
-class Enc_11542684 : OpcodeHexagon {
- bits <16> Ii;
- let Inst{27-21} = Ii{15-9};
- let Inst{13-5} = Ii{8-0};
- bits <5> Rs32;
- let Inst{20-16} = Rs32{4-0};
- bits <5> Rd32;
- let Inst{4-0} = Rd32{4-0};
-}
-class Enc_8877260 : OpcodeHexagon {
+class Enc_31db33 : OpcodeHexagon {
+ bits <2> Qt4;
+ let Inst{6-5} = Qt4{1-0};
bits <5> Vu32;
let Inst{12-8} = Vu32{4-0};
bits <5> Vv32;
- let Inst{23-19} = Vv32{4-0};
- bits <3> Rt8;
- let Inst{18-16} = Rt8{2-0};
- bits <5> Vx32;
- let Inst{4-0} = Vx32{4-0};
+ let Inst{20-16} = Vv32{4-0};
+ bits <5> Vd32;
+ let Inst{4-0} = Vd32{4-0};
}
-class Enc_1737833 : OpcodeHexagon {
+class Enc_65f095 : OpcodeHexagon {
bits <6> Ii;
- let Inst{13-13} = Ii{5-5};
- let Inst{7-3} = Ii{4-0};
+ let Inst{6-3} = Ii{5-2};
bits <2> Pv4;
let Inst{1-0} = Pv4{1-0};
- bits <5> Rs32;
- let Inst{20-16} = Rs32{4-0};
bits <3> Nt8;
let Inst{10-8} = Nt8{2-0};
+ bits <5> Rx32;
+ let Inst{20-16} = Rx32{4-0};
}
-class Enc_255516 : OpcodeHexagon {
- bits <5> Vuu32;
- let Inst{20-16} = Vuu32{4-0};
- bits <5> Vv32;
- let Inst{12-8} = Vv32{4-0};
- bits <5> Vdd32;
- let Inst{7-3} = Vdd32{4-0};
+class Enc_784502 : OpcodeHexagon {
+ bits <3> Ii;
+ let Inst{10-8} = Ii{2-0};
+ bits <2> Pv4;
+ let Inst{12-11} = Pv4{1-0};
+ bits <3> Os8;
+ let Inst{2-0} = Os8{2-0};
+ bits <5> Rx32;
+ let Inst{20-16} = Rx32{4-0};
}
-class Enc_10721363 : OpcodeHexagon {
+class Enc_6413b6 : OpcodeHexagon {
+ bits <11> Ii;
+ let Inst{21-20} = Ii{10-9};
+ let Inst{7-1} = Ii{8-2};
+ bits <3> Ns8;
+ let Inst{18-16} = Ns8{2-0};
+ bits <5> n1;
+ let Inst{29-29} = n1{4-4};
+ let Inst{26-25} = n1{3-2};
+ let Inst{23-23} = n1{1-1};
+ let Inst{13-13} = n1{0-0};
+}
+class Enc_7a0ea6 : OpcodeHexagon {
+ bits <4> Rd16;
+ let Inst{3-0} = Rd16{3-0};
+ bits <1> n1;
+ let Inst{9-9} = n1{0-0};
+}
+class Enc_84bff1 : OpcodeHexagon {
bits <2> Ii;
let Inst{13-13} = Ii{1-1};
let Inst{7-7} = Ii{0-0};
@@ -760,90 +539,138 @@ class Enc_10721363 : OpcodeHexagon {
let Inst{20-16} = Rs32{4-0};
bits <5> Rt32;
let Inst{12-8} = Rt32{4-0};
- bits <5> Rd32;
- let Inst{4-0} = Rd32{4-0};
+ bits <5> Rdd32;
+ let Inst{4-0} = Rdd32{4-0};
}
-class Enc_7076358 : OpcodeHexagon {
- bits <5> Zdd8;
- let Inst{4-0} = Zdd8{4-0};
+class Enc_74aef2 : OpcodeHexagon {
+ bits <4> Ii;
+ let Inst{8-5} = Ii{3-0};
+ bits <1> Mu2;
+ let Inst{13-13} = Mu2{0-0};
+ bits <5> Ryy32;
+ let Inst{4-0} = Ryy32{4-0};
bits <5> Rx32;
let Inst{20-16} = Rx32{4-0};
}
-class Enc_11930928 : OpcodeHexagon {
- bits <5> Ii;
- let Inst{12-8} = Ii{4-0};
- bits <5> II;
- let Inst{22-21} = II{4-3};
- let Inst{7-5} = II{2-0};
- bits <5> Rs32;
- let Inst{20-16} = Rs32{4-0};
- bits <5> Rd32;
- let Inst{4-0} = Rd32{4-0};
+class Enc_78e566 : OpcodeHexagon {
+ bits <2> Pt4;
+ let Inst{9-8} = Pt4{1-0};
+ bits <5> Rdd32;
+ let Inst{4-0} = Rdd32{4-0};
}
-class Enc_2410156 : OpcodeHexagon {
- bits <5> Ii;
- let Inst{12-8} = Ii{4-0};
+class Enc_437f33 : OpcodeHexagon {
bits <5> Rs32;
let Inst{20-16} = Rs32{4-0};
+ bits <5> Rt32;
+ let Inst{12-8} = Rt32{4-0};
+ bits <2> Pu4;
+ let Inst{6-5} = Pu4{1-0};
bits <5> Rx32;
let Inst{4-0} = Rx32{4-0};
}
-class Enc_6735062 : OpcodeHexagon {
- bits <2> Ps4;
- let Inst{17-16} = Ps4{1-0};
- bits <2> Pt4;
- let Inst{9-8} = Pt4{1-0};
+class Enc_0527db : OpcodeHexagon {
+ bits <4> Rs16;
+ let Inst{7-4} = Rs16{3-0};
+ bits <4> Rx16;
+ let Inst{3-0} = Rx16{3-0};
+}
+class Enc_420cf3 : OpcodeHexagon {
+ bits <6> Ii;
+ let Inst{22-21} = Ii{5-4};
+ let Inst{13-13} = Ii{3-3};
+ let Inst{7-5} = Ii{2-0};
+ bits <5> Ru32;
+ let Inst{4-0} = Ru32{4-0};
+ bits <5> Rs32;
+ let Inst{20-16} = Rs32{4-0};
bits <5> Rd32;
- let Inst{4-0} = Rd32{4-0};
+ let Inst{12-8} = Rd32{4-0};
}
-class Enc_7965855 : OpcodeHexagon {
- bits <5> Vu32;
- let Inst{20-16} = Vu32{4-0};
- bits <5> Vv32;
- let Inst{12-8} = Vv32{4-0};
- bits <5> Vd32;
- let Inst{7-3} = Vd32{4-0};
+class Enc_e39bb2 : OpcodeHexagon {
+ bits <6> Ii;
+ let Inst{9-4} = Ii{5-0};
+ bits <4> Rd16;
+ let Inst{3-0} = Rd16{3-0};
}
-class Enc_5202340 : OpcodeHexagon {
- bits <5> Vu32;
- let Inst{12-8} = Vu32{4-0};
- bits <5> Vyy32;
- let Inst{4-0} = Vyy32{4-0};
- bits <5> Rx32;
- let Inst{20-16} = Rx32{4-0};
+class Enc_1b64fb : OpcodeHexagon {
+ bits <16> Ii;
+ let Inst{26-25} = Ii{15-14};
+ let Inst{20-16} = Ii{13-9};
+ let Inst{13-13} = Ii{8-8};
+ let Inst{7-0} = Ii{7-0};
+ bits <5> Rt32;
+ let Inst{12-8} = Rt32{4-0};
}
-class Enc_10568534 : OpcodeHexagon {
- bits <8> Ii;
- let Inst{12-5} = Ii{7-0};
- bits <2> Pu4;
- let Inst{22-21} = Pu4{1-0};
+class Enc_c6220b : OpcodeHexagon {
+ bits <2> Ii;
+ let Inst{13-13} = Ii{1-1};
+ let Inst{7-7} = Ii{0-0};
bits <5> Rs32;
let Inst{20-16} = Rs32{4-0};
- bits <5> Rd32;
- let Inst{4-0} = Rd32{4-0};
+ bits <5> Ru32;
+ let Inst{12-8} = Ru32{4-0};
+ bits <3> Nt8;
+ let Inst{2-0} = Nt8{2-0};
}
-class Enc_16730127 : OpcodeHexagon {
- bits <3> Ii;
+class Enc_322e1b : OpcodeHexagon {
+ bits <6> Ii;
+ let Inst{22-21} = Ii{5-4};
+ let Inst{13-13} = Ii{3-3};
let Inst{7-5} = Ii{2-0};
- bits <5> Rss32;
- let Inst{20-16} = Rss32{4-0};
- bits <5> Rtt32;
- let Inst{12-8} = Rtt32{4-0};
- bits <5> Rdd32;
- let Inst{4-0} = Rdd32{4-0};
-}
-class Enc_11224149 : OpcodeHexagon {
- bits <8> Ii;
- let Inst{13-13} = Ii{7-7};
- let Inst{7-3} = Ii{6-2};
- bits <2> Pv4;
- let Inst{1-0} = Pv4{1-0};
+ bits <6> II;
+ let Inst{23-23} = II{5-5};
+ let Inst{4-0} = II{4-0};
bits <5> Rs32;
let Inst{20-16} = Rs32{4-0};
+ bits <5> Rd32;
+ let Inst{12-8} = Rd32{4-0};
+}
+class Enc_989021 : OpcodeHexagon {
+ bits <5> Rt32;
+ let Inst{20-16} = Rt32{4-0};
+ bits <5> Vy32;
+ let Inst{12-8} = Vy32{4-0};
+ bits <5> Vx32;
+ let Inst{4-0} = Vx32{4-0};
+}
+class Enc_178717 : OpcodeHexagon {
+ bits <11> Ii;
+ let Inst{21-20} = Ii{10-9};
+ let Inst{7-1} = Ii{8-2};
+ bits <4> Rs16;
+ let Inst{19-16} = Rs16{3-0};
+ bits <6> n1;
+ let Inst{28-28} = n1{5-5};
+ let Inst{25-23} = n1{4-2};
+ let Inst{13-13} = n1{1-1};
+ let Inst{8-8} = n1{0-0};
+}
+class Enc_78cbf0 : OpcodeHexagon {
+ bits <18> Ii;
+ let Inst{26-25} = Ii{17-16};
+ let Inst{20-16} = Ii{15-11};
+ let Inst{13-13} = Ii{10-10};
+ let Inst{7-0} = Ii{9-2};
bits <3> Nt8;
let Inst{10-8} = Nt8{2-0};
}
-class Enc_9772987 : OpcodeHexagon {
+class Enc_052c7d : OpcodeHexagon {
+ bits <5> Ii;
+ let Inst{6-3} = Ii{4-1};
+ bits <5> Rt32;
+ let Inst{12-8} = Rt32{4-0};
+ bits <5> Rx32;
+ let Inst{20-16} = Rx32{4-0};
+}
+class Enc_fcf7a7 : OpcodeHexagon {
+ bits <5> Rss32;
+ let Inst{20-16} = Rss32{4-0};
+ bits <5> Rtt32;
+ let Inst{12-8} = Rtt32{4-0};
+ bits <2> Pd4;
+ let Inst{1-0} = Pd4{1-0};
+}
+class Enc_55355c : OpcodeHexagon {
bits <2> Ii;
let Inst{13-13} = Ii{1-1};
let Inst{7-7} = Ii{0-0};
@@ -854,342 +681,259 @@ class Enc_9772987 : OpcodeHexagon {
bits <5> Rtt32;
let Inst{4-0} = Rtt32{4-0};
}
-class Enc_9238139 : OpcodeHexagon {
- bits <1> Mu2;
- let Inst{13-13} = Mu2{0-0};
- bits <5> Zdd8;
- let Inst{4-0} = Zdd8{4-0};
- bits <5> Rx32;
- let Inst{20-16} = Rx32{4-0};
-}
-class Enc_2082775 : OpcodeHexagon {
- bits <4> Ii;
- let Inst{11-8} = Ii{3-0};
- bits <5> Rss32;
- let Inst{20-16} = Rss32{4-0};
- bits <5> Rdd32;
- let Inst{4-0} = Rdd32{4-0};
-}
-class Enc_5790679 : OpcodeHexagon {
- bits <9> Ii;
- let Inst{12-8} = Ii{8-4};
- let Inst{4-3} = Ii{3-2};
+class Enc_211aaa : OpcodeHexagon {
+ bits <11> Ii;
+ let Inst{26-25} = Ii{10-9};
+ let Inst{13-5} = Ii{8-0};
bits <5> Rs32;
let Inst{20-16} = Rs32{4-0};
+ bits <5> Rd32;
+ let Inst{4-0} = Rd32{4-0};
}
-class Enc_9305257 : OpcodeHexagon {
- bits <5> Zu8;
- let Inst{12-8} = Zu8{4-0};
- bits <5> Vd32;
- let Inst{4-0} = Vd32{4-0};
-}
-class Enc_3735566 : OpcodeHexagon {
- bits <3> Ii;
- let Inst{10-8} = Ii{2-0};
- bits <2> Pv4;
- let Inst{12-11} = Pv4{1-0};
- bits <3> Os8;
- let Inst{2-0} = Os8{2-0};
- bits <5> Rx32;
- let Inst{20-16} = Rx32{4-0};
-}
-class Enc_12654528 : OpcodeHexagon {
- bits <2> Qs4;
- let Inst{6-5} = Qs4{1-0};
+class Enc_6185fe : OpcodeHexagon {
+ bits <2> Ii;
+ let Inst{13-13} = Ii{1-1};
+ let Inst{7-7} = Ii{0-0};
+ bits <6> II;
+ let Inst{11-8} = II{5-2};
+ let Inst{6-5} = II{1-0};
bits <5> Rt32;
let Inst{20-16} = Rt32{4-0};
- bits <1> Mu2;
- let Inst{13-13} = Mu2{0-0};
- bits <5> Vvv32;
- let Inst{4-0} = Vvv32{4-0};
+ bits <5> Rdd32;
+ let Inst{4-0} = Rdd32{4-0};
}
-class Enc_15290236 : OpcodeHexagon {
+class Enc_cd4705 : OpcodeHexagon {
+ bits <3> Ii;
+ let Inst{7-5} = Ii{2-0};
bits <5> Vu32;
let Inst{12-8} = Vu32{4-0};
bits <5> Vv32;
let Inst{20-16} = Vv32{4-0};
- bits <5> Vdd32;
- let Inst{4-0} = Vdd32{4-0};
+ bits <5> Vx32;
+ let Inst{4-0} = Vx32{4-0};
}
-class Enc_11139981 : OpcodeHexagon {
- bits <2> Ps4;
- let Inst{17-16} = Ps4{1-0};
+class Enc_2ebe3b : OpcodeHexagon {
+ bits <1> Mu2;
+ let Inst{13-13} = Mu2{0-0};
+ bits <5> Vd32;
+ let Inst{4-0} = Vd32{4-0};
+ bits <5> Rx32;
+ let Inst{20-16} = Rx32{4-0};
+}
+class Enc_3d5b28 : OpcodeHexagon {
+ bits <5> Rss32;
+ let Inst{20-16} = Rss32{4-0};
+ bits <5> Rt32;
+ let Inst{12-8} = Rt32{4-0};
bits <5> Rd32;
let Inst{4-0} = Rd32{4-0};
}
-class Enc_15546666 : OpcodeHexagon {
- bits <9> Ii;
- let Inst{10-8} = Ii{8-6};
- bits <5> Rx32;
- let Inst{20-16} = Rx32{4-0};
+class Enc_5ab2be : OpcodeHexagon {
+ bits <5> Rs32;
+ let Inst{20-16} = Rs32{4-0};
+ bits <5> Rt32;
+ let Inst{12-8} = Rt32{4-0};
+ bits <5> Rd32;
+ let Inst{4-0} = Rd32{4-0};
}
-class Enc_486163 : OpcodeHexagon {
- bits <2> Ii;
- let Inst{13-13} = Ii{1-1};
- let Inst{7-7} = Ii{0-0};
- bits <6> II;
- let Inst{11-8} = II{5-2};
- let Inst{6-5} = II{1-0};
+class Enc_fef969 : OpcodeHexagon {
+ bits <6> Ii;
+ let Inst{20-16} = Ii{5-1};
+ let Inst{5-5} = Ii{0-0};
bits <5> Rt32;
- let Inst{20-16} = Rt32{4-0};
+ let Inst{12-8} = Rt32{4-0};
bits <5> Rd32;
let Inst{4-0} = Rd32{4-0};
}
-class Enc_2079016 : OpcodeHexagon {
+class Enc_63eaeb : OpcodeHexagon {
bits <2> Ii;
let Inst{1-0} = Ii{1-0};
bits <4> Rs16;
let Inst{7-4} = Rs16{3-0};
}
-class Enc_10095813 : OpcodeHexagon {
+class Enc_95441f : OpcodeHexagon {
bits <5> Vu32;
let Inst{12-8} = Vu32{4-0};
- bits <5> Rtt32;
- let Inst{20-16} = Rtt32{4-0};
- bits <5> Vdd32;
- let Inst{4-0} = Vdd32{4-0};
-}
-class Enc_13133322 : OpcodeHexagon {
- bits <5> Vu32;
- let Inst{20-16} = Vu32{4-0};
- bits <5> Vx32;
- let Inst{7-3} = Vx32{4-0};
-}
-class Enc_9422954 : OpcodeHexagon {
- bits <2> Pu4;
- let Inst{9-8} = Pu4{1-0};
- bits <5> Rs32;
- let Inst{20-16} = Rs32{4-0};
- bits <5> Rd32;
- let Inst{4-0} = Rd32{4-0};
+ bits <5> Vv32;
+ let Inst{20-16} = Vv32{4-0};
+ bits <2> Qd4;
+ let Inst{1-0} = Qd4{1-0};
}
-class Enc_10642833 : OpcodeHexagon {
+class Enc_372c9d : OpcodeHexagon {
+ bits <2> Pv4;
+ let Inst{12-11} = Pv4{1-0};
bits <1> Mu2;
let Inst{13-13} = Mu2{0-0};
- bits <5> Vs32;
- let Inst{7-3} = Vs32{4-0};
+ bits <3> Os8;
+ let Inst{2-0} = Os8{2-0};
bits <5> Rx32;
let Inst{20-16} = Rx32{4-0};
}
-class Enc_14989332 : OpcodeHexagon {
- bits <5> Rt32;
- let Inst{20-16} = Rt32{4-0};
+class Enc_4dff07 : OpcodeHexagon {
+ bits <2> Qv4;
+ let Inst{12-11} = Qv4{1-0};
bits <1> Mu2;
let Inst{13-13} = Mu2{0-0};
- bits <5> Vv32;
- let Inst{4-0} = Vv32{4-0};
-}
-class Enc_10263630 : OpcodeHexagon {
- bits <5> Vu32;
- let Inst{20-16} = Vu32{4-0};
- bits <5> Vv32;
- let Inst{12-8} = Vv32{4-0};
- bits <3> Rt8;
- let Inst{2-0} = Rt8{2-0};
- bits <5> Vx32;
- let Inst{7-3} = Vx32{4-0};
+ bits <5> Vs32;
+ let Inst{4-0} = Vs32{4-0};
+ bits <5> Rx32;
+ let Inst{20-16} = Rx32{4-0};
}
-class Enc_13937564 : OpcodeHexagon {
- bits <4> Ii;
- let Inst{13-13} = Ii{3-3};
- let Inst{10-8} = Ii{2-0};
- bits <2> Pv4;
- let Inst{12-11} = Pv4{1-0};
+class Enc_04c959 : OpcodeHexagon {
+ bits <2> Ii;
+ let Inst{13-13} = Ii{1-1};
+ let Inst{7-7} = Ii{0-0};
+ bits <6> II;
+ let Inst{11-8} = II{5-2};
+ let Inst{6-5} = II{1-0};
bits <5> Rt32;
let Inst{20-16} = Rt32{4-0};
- bits <3> Os8;
- let Inst{2-0} = Os8{2-0};
+ bits <5> Ryy32;
+ let Inst{4-0} = Ryy32{4-0};
}
-class Enc_7171569 : OpcodeHexagon {
+class Enc_b62ef7 : OpcodeHexagon {
bits <3> Ii;
- let Inst{7-5} = Ii{2-0};
- bits <5> Vu32;
- let Inst{12-8} = Vu32{4-0};
- bits <5> Vv32;
- let Inst{20-16} = Vv32{4-0};
- bits <5> Vd32;
- let Inst{4-0} = Vd32{4-0};
+ let Inst{10-8} = Ii{2-0};
+ bits <5> Vs32;
+ let Inst{4-0} = Vs32{4-0};
+ bits <5> Rx32;
+ let Inst{20-16} = Rx32{4-0};
}
-class Enc_2702036 : OpcodeHexagon {
- bits <10> Ii;
- let Inst{21-21} = Ii{9-9};
- let Inst{13-5} = Ii{8-0};
- bits <5> Rdd32;
- let Inst{4-0} = Rdd32{4-0};
+class Enc_2b518f : OpcodeHexagon {
+ bits <32> Ii;
+ let Inst{27-16} = Ii{31-20};
+ let Inst{13-0} = Ii{19-6};
}
-class Enc_1928953 : OpcodeHexagon {
- bits <2> Pu4;
- let Inst{9-8} = Pu4{1-0};
+class Enc_b388cf : OpcodeHexagon {
+ bits <5> Ii;
+ let Inst{12-8} = Ii{4-0};
+ bits <5> II;
+ let Inst{22-21} = II{4-3};
+ let Inst{7-5} = II{2-0};
bits <5> Rs32;
let Inst{20-16} = Rs32{4-0};
+ bits <5> Rd32;
+ let Inst{4-0} = Rd32{4-0};
}
-class Enc_5853469 : OpcodeHexagon {
- bits <5> Rs32;
- let Inst{20-16} = Rs32{4-0};
- bits <5> Rt32;
- let Inst{12-8} = Rt32{4-0};
+class Enc_ad1c74 : OpcodeHexagon {
+ bits <11> Ii;
+ let Inst{21-20} = Ii{10-9};
+ let Inst{7-1} = Ii{8-2};
+ bits <4> Rs16;
+ let Inst{19-16} = Rs16{3-0};
+}
+class Enc_74d4e5 : OpcodeHexagon {
+ bits <1> Mu2;
+ let Inst{13-13} = Mu2{0-0};
bits <5> Rd32;
let Inst{4-0} = Rd32{4-0};
- bits <2> Pe4;
- let Inst{6-5} = Pe4{1-0};
+ bits <5> Rx32;
+ let Inst{20-16} = Rx32{4-0};
}
-class Enc_7692963 : OpcodeHexagon {
- bits <5> Rt32;
- let Inst{12-8} = Rt32{4-0};
+class Enc_c90aca : OpcodeHexagon {
+ bits <8> Ii;
+ let Inst{12-5} = Ii{7-0};
bits <5> Rs32;
let Inst{20-16} = Rs32{4-0};
bits <5> Rx32;
let Inst{4-0} = Rx32{4-0};
}
-class Enc_15140689 : OpcodeHexagon {
+class Enc_222336 : OpcodeHexagon {
+ bits <4> Ii;
+ let Inst{8-5} = Ii{3-0};
+ bits <5> Rd32;
+ let Inst{4-0} = Rd32{4-0};
+ bits <5> Rx32;
+ let Inst{20-16} = Rx32{4-0};
+}
+class Enc_5e87ce : OpcodeHexagon {
+ bits <16> Ii;
+ let Inst{23-22} = Ii{15-14};
+ let Inst{20-16} = Ii{13-9};
+ let Inst{13-5} = Ii{8-0};
+ bits <5> Rd32;
+ let Inst{4-0} = Rd32{4-0};
+}
+class Enc_f7ea77 : OpcodeHexagon {
bits <11> Ii;
let Inst{21-20} = Ii{10-9};
let Inst{7-1} = Ii{8-2};
bits <3> Ns8;
let Inst{18-16} = Ns8{2-0};
- bits <5> Rt32;
- let Inst{12-8} = Rt32{4-0};
-}
-class Enc_748676 : OpcodeHexagon {
- bits <12> Ii;
- let Inst{26-25} = Ii{11-10};
- let Inst{13-13} = Ii{9-9};
- let Inst{7-0} = Ii{8-1};
- bits <5> Rs32;
- let Inst{20-16} = Rs32{4-0};
- bits <3> Nt8;
- let Inst{10-8} = Nt8{2-0};
-}
-class Enc_3372766 : OpcodeHexagon {
- bits <5> Ii;
- let Inst{8-5} = Ii{4-1};
- bits <5> Ryy32;
- let Inst{4-0} = Ryy32{4-0};
- bits <5> Rx32;
- let Inst{20-16} = Rx32{4-0};
+ bits <4> n1;
+ let Inst{29-29} = n1{3-3};
+ let Inst{26-25} = n1{2-1};
+ let Inst{13-13} = n1{0-0};
}
-class Enc_7900405 : OpcodeHexagon {
- bits <6> Ii;
- let Inst{6-3} = Ii{5-2};
- bits <3> Nt8;
- let Inst{10-8} = Nt8{2-0};
- bits <5> Rx32;
- let Inst{20-16} = Rx32{4-0};
+class Enc_245865 : OpcodeHexagon {
+ bits <5> Vu32;
+ let Inst{12-8} = Vu32{4-0};
+ bits <5> Vv32;
+ let Inst{23-19} = Vv32{4-0};
+ bits <3> Rt8;
+ let Inst{18-16} = Rt8{2-0};
+ bits <5> Vx32;
+ let Inst{4-0} = Vx32{4-0};
}
-class Enc_11930027 : OpcodeHexagon {
- bits <12> Ii;
- let Inst{26-25} = Ii{11-10};
- let Inst{13-5} = Ii{9-1};
+class Enc_88d4d9 : OpcodeHexagon {
+ bits <2> Pu4;
+ let Inst{9-8} = Pu4{1-0};
bits <5> Rs32;
let Inst{20-16} = Rs32{4-0};
- bits <5> Ryy32;
- let Inst{4-0} = Ryy32{4-0};
}
-class Enc_971574 : OpcodeHexagon {
- bits <6> Ii;
- let Inst{22-21} = Ii{5-4};
- let Inst{13-13} = Ii{3-3};
- let Inst{7-5} = Ii{2-0};
- bits <6> II;
- let Inst{23-23} = II{5-5};
- let Inst{4-0} = II{4-0};
+class Enc_c0cdde : OpcodeHexagon {
+ bits <9> Ii;
+ let Inst{13-5} = Ii{8-0};
bits <5> Rs32;
let Inst{20-16} = Rs32{4-0};
- bits <5> Rd32;
- let Inst{12-8} = Rd32{4-0};
-}
-class Enc_13453446 : OpcodeHexagon {
- bits <24> Ii;
- let Inst{24-16} = Ii{23-15};
- let Inst{13-1} = Ii{14-2};
+ bits <2> Pd4;
+ let Inst{1-0} = Pd4{1-0};
}
-class Enc_6356866 : OpcodeHexagon {
- bits <10> Ii;
- let Inst{21-21} = Ii{9-9};
- let Inst{13-5} = Ii{8-0};
+class Enc_226535 : OpcodeHexagon {
+ bits <8> Ii;
+ let Inst{12-7} = Ii{7-2};
bits <5> Rs32;
let Inst{20-16} = Rs32{4-0};
- bits <5> Rx32;
- let Inst{4-0} = Rx32{4-0};
-}
-class Enc_16246706 : OpcodeHexagon {
- bits <5> Vdd32;
- let Inst{7-3} = Vdd32{4-0};
+ bits <5> Rt32;
+ let Inst{4-0} = Rt32{4-0};
}
-class Enc_5326450 : OpcodeHexagon {
- bits <4> Ii;
- let Inst{6-3} = Ii{3-0};
- bits <1> Mu2;
- let Inst{13-13} = Mu2{0-0};
+class Enc_31aa6a : OpcodeHexagon {
+ bits <5> Ii;
+ let Inst{6-3} = Ii{4-1};
+ bits <2> Pv4;
+ let Inst{1-0} = Pv4{1-0};
bits <3> Nt8;
let Inst{10-8} = Nt8{2-0};
bits <5> Rx32;
let Inst{20-16} = Rx32{4-0};
}
-class Enc_11687333 : OpcodeHexagon {
- bits <5> Rtt32;
- let Inst{12-8} = Rtt32{4-0};
- bits <5> Rss32;
- let Inst{20-16} = Rss32{4-0};
- bits <5> Rdd32;
- let Inst{4-0} = Rdd32{4-0};
-}
-class Enc_2771456 : OpcodeHexagon {
- bits <5> Ii;
- let Inst{12-8} = Ii{4-0};
- bits <5> Rs32;
- let Inst{20-16} = Rs32{4-0};
- bits <5> Rd32;
- let Inst{4-0} = Rd32{4-0};
-}
-class Enc_11282123 : OpcodeHexagon {
- bits <6> Ii;
- let Inst{12-7} = Ii{5-0};
- bits <8> II;
- let Inst{13-13} = II{7-7};
- let Inst{6-0} = II{6-0};
+class Enc_397f23 : OpcodeHexagon {
+ bits <8> Ii;
+ let Inst{13-13} = Ii{7-7};
+ let Inst{7-3} = Ii{6-2};
+ bits <2> Pv4;
+ let Inst{1-0} = Pv4{1-0};
bits <5> Rs32;
let Inst{20-16} = Rs32{4-0};
-}
-class Enc_518319 : OpcodeHexagon {
- bits <6> Ii;
- let Inst{20-16} = Ii{5-1};
- let Inst{5-5} = Ii{0-0};
bits <5> Rt32;
let Inst{12-8} = Rt32{4-0};
- bits <5> Rd32;
- let Inst{4-0} = Rd32{4-0};
}
-class Enc_16104442 : OpcodeHexagon {
- bits <5> Vu32;
- let Inst{12-8} = Vu32{4-0};
- bits <5> Rtt32;
- let Inst{20-16} = Rtt32{4-0};
- bits <5> Vd32;
- let Inst{7-3} = Vd32{4-0};
-}
-class Enc_7912540 : OpcodeHexagon {
- bits <5> Rss32;
- let Inst{20-16} = Rss32{4-0};
- bits <5> Rt32;
- let Inst{12-8} = Rt32{4-0};
- bits <5> Rxx32;
- let Inst{4-0} = Rxx32{4-0};
-}
-class Enc_15560488 : OpcodeHexagon {
+class Enc_865390 : OpcodeHexagon {
bits <3> Ii;
let Inst{10-8} = Ii{2-0};
bits <2> Pv4;
let Inst{12-11} = Pv4{1-0};
- bits <5> Vd32;
- let Inst{4-0} = Vd32{4-0};
+ bits <5> Vs32;
+ let Inst{4-0} = Vs32{4-0};
bits <5> Rx32;
let Inst{20-16} = Rx32{4-0};
}
-class Enc_7581852 : OpcodeHexagon {
+class Enc_98c0b8 : OpcodeHexagon {
bits <2> Ii;
let Inst{13-13} = Ii{1-1};
let Inst{7-7} = Ii{0-0};
+ bits <2> Pv4;
+ let Inst{6-5} = Pv4{1-0};
bits <5> Rs32;
let Inst{20-16} = Rs32{4-0};
bits <5> Rt32;
@@ -1197,144 +941,139 @@ class Enc_7581852 : OpcodeHexagon {
bits <5> Rdd32;
let Inst{4-0} = Rdd32{4-0};
}
-class Enc_10030031 : OpcodeHexagon {
- bits <5> Vu32;
- let Inst{12-8} = Vu32{4-0};
- bits <5> Rt32;
- let Inst{20-16} = Rt32{4-0};
- bits <5> Vd32;
- let Inst{7-3} = Vd32{4-0};
-}
-class Enc_3915770 : OpcodeHexagon {
- bits <4> Ii;
- let Inst{6-3} = Ii{3-0};
- bits <1> Mu2;
- let Inst{13-13} = Mu2{0-0};
- bits <5> Rt32;
- let Inst{12-8} = Rt32{4-0};
- bits <5> Rx32;
- let Inst{20-16} = Rx32{4-0};
+class Enc_bfbf03 : OpcodeHexagon {
+ bits <2> Qs4;
+ let Inst{9-8} = Qs4{1-0};
+ bits <2> Qd4;
+ let Inst{1-0} = Qd4{1-0};
}
-class Enc_4075554 : OpcodeHexagon {
+class Enc_ecbcc8 : OpcodeHexagon {
bits <5> Rs32;
let Inst{20-16} = Rs32{4-0};
+}
+class Enc_f5e933 : OpcodeHexagon {
+ bits <2> Ps4;
+ let Inst{17-16} = Ps4{1-0};
bits <5> Rd32;
let Inst{4-0} = Rd32{4-0};
}
-class Enc_11326438 : OpcodeHexagon {
- bits <6> Ii;
- let Inst{6-3} = Ii{5-2};
- bits <1> Mu2;
- let Inst{13-13} = Mu2{0-0};
- bits <3> Nt8;
- let Inst{10-8} = Nt8{2-0};
- bits <5> Rx32;
- let Inst{20-16} = Rx32{4-0};
+class Enc_3fc427 : OpcodeHexagon {
+ bits <5> Vu32;
+ let Inst{12-8} = Vu32{4-0};
+ bits <5> Vv32;
+ let Inst{20-16} = Vv32{4-0};
+ bits <5> Vxx32;
+ let Inst{4-0} = Vxx32{4-0};
}
-class Enc_4050532 : OpcodeHexagon {
- bits <16> Ii;
- let Inst{26-25} = Ii{15-14};
- let Inst{20-16} = Ii{13-9};
- let Inst{13-13} = Ii{8-8};
- let Inst{7-0} = Ii{7-0};
- bits <3> Nt8;
- let Inst{10-8} = Nt8{2-0};
+class Enc_01d3d0 : OpcodeHexagon {
+ bits <5> Vu32;
+ let Inst{12-8} = Vu32{4-0};
+ bits <5> Rt32;
+ let Inst{20-16} = Rt32{4-0};
+ bits <5> Vdd32;
+ let Inst{4-0} = Vdd32{4-0};
}
-class Enc_14461004 : OpcodeHexagon {
- bits <11> Ii;
- let Inst{26-25} = Ii{10-9};
+class Enc_b0e9d8 : OpcodeHexagon {
+ bits <10> Ii;
+ let Inst{21-21} = Ii{9-9};
let Inst{13-5} = Ii{8-0};
bits <5> Rs32;
let Inst{20-16} = Rs32{4-0};
- bits <5> Rd32;
- let Inst{4-0} = Rd32{4-0};
-}
-class Enc_13344657 : OpcodeHexagon {
- bits <6> Ii;
- let Inst{20-16} = Ii{5-1};
- let Inst{8-8} = Ii{0-0};
- bits <2> Pt4;
- let Inst{10-9} = Pt4{1-0};
- bits <5> Rd32;
- let Inst{4-0} = Rd32{4-0};
+ bits <5> Rx32;
+ let Inst{4-0} = Rx32{4-0};
}
-class Enc_13114546 : OpcodeHexagon {
- bits <2> Ii;
- let Inst{13-13} = Ii{1-1};
- let Inst{5-5} = Ii{0-0};
- bits <5> Rss32;
- let Inst{20-16} = Rss32{4-0};
- bits <5> Rt32;
- let Inst{12-8} = Rt32{4-0};
- bits <5> Rxx32;
- let Inst{4-0} = Rxx32{4-0};
+class Enc_3694bd : OpcodeHexagon {
+ bits <11> Ii;
+ let Inst{21-20} = Ii{10-9};
+ let Inst{7-1} = Ii{8-2};
+ bits <3> Ns8;
+ let Inst{18-16} = Ns8{2-0};
+ bits <5> n1;
+ let Inst{29-29} = n1{4-4};
+ let Inst{26-25} = n1{3-2};
+ let Inst{23-22} = n1{1-0};
}
-class Enc_14530015 : OpcodeHexagon {
+class Enc_a42857 : OpcodeHexagon {
bits <11> Ii;
let Inst{21-20} = Ii{10-9};
let Inst{7-1} = Ii{8-2};
bits <4> Rs16;
let Inst{19-16} = Rs16{3-0};
- bits <6> n1;
- let Inst{28-28} = n1{5-5};
- let Inst{25-23} = n1{4-2};
- let Inst{13-13} = n1{1-1};
+ bits <5> n1;
+ let Inst{28-28} = n1{4-4};
+ let Inst{24-22} = n1{3-1};
let Inst{8-8} = n1{0-0};
}
-class Enc_5967898 : OpcodeHexagon {
- bits <6> Ii;
- let Inst{12-7} = Ii{5-0};
- bits <6> II;
- let Inst{13-13} = II{5-5};
- let Inst{4-0} = II{4-0};
+class Enc_b7fad3 : OpcodeHexagon {
bits <2> Pv4;
- let Inst{6-5} = Pv4{1-0};
+ let Inst{9-8} = Pv4{1-0};
bits <5> Rs32;
let Inst{20-16} = Rs32{4-0};
+ bits <5> Rdd32;
+ let Inst{4-0} = Rdd32{4-0};
+}
+class Enc_223005 : OpcodeHexagon {
+ bits <6> Ii;
+ let Inst{6-3} = Ii{5-2};
+ bits <3> Nt8;
+ let Inst{10-8} = Nt8{2-0};
+ bits <5> Rx32;
+ let Inst{20-16} = Rx32{4-0};
}
-class Enc_15450971 : OpcodeHexagon {
+class Enc_9e4c3f : OpcodeHexagon {
+ bits <6> II;
+ let Inst{13-8} = II{5-0};
bits <11> Ii;
let Inst{21-20} = Ii{10-9};
let Inst{7-1} = Ii{8-2};
- bits <4> Rs16;
- let Inst{19-16} = Rs16{3-0};
- bits <6> n1;
- let Inst{28-28} = n1{5-5};
- let Inst{25-22} = n1{4-1};
- let Inst{13-13} = n1{0-0};
+ bits <4> Rd16;
+ let Inst{19-16} = Rd16{3-0};
}
-class Enc_15536400 : OpcodeHexagon {
+class Enc_8b8d61 : OpcodeHexagon {
bits <6> Ii;
- let Inst{3-0} = Ii{5-2};
- bits <4> Rs16;
- let Inst{7-4} = Rs16{3-0};
+ let Inst{22-21} = Ii{5-4};
+ let Inst{13-13} = Ii{3-3};
+ let Inst{7-5} = Ii{2-0};
+ bits <5> Rs32;
+ let Inst{20-16} = Rs32{4-0};
+ bits <5> Ru32;
+ let Inst{4-0} = Ru32{4-0};
+ bits <5> Rd32;
+ let Inst{12-8} = Rd32{4-0};
}
-class Enc_1291652 : OpcodeHexagon {
- bits <1> Ii;
- let Inst{8-8} = Ii{0-0};
+class Enc_88c16c : OpcodeHexagon {
+ bits <5> Rss32;
+ let Inst{20-16} = Rss32{4-0};
+ bits <5> Rtt32;
+ let Inst{12-8} = Rtt32{4-0};
+ bits <5> Rxx32;
+ let Inst{4-0} = Rxx32{4-0};
}
-class Enc_5636753 : OpcodeHexagon {
+class Enc_770858 : OpcodeHexagon {
+ bits <2> Ps4;
+ let Inst{6-5} = Ps4{1-0};
bits <5> Vu32;
- let Inst{20-16} = Vu32{4-0};
+ let Inst{12-8} = Vu32{4-0};
+ bits <5> Vd32;
+ let Inst{4-0} = Vd32{4-0};
}
-class Enc_5757366 : OpcodeHexagon {
- bits <4> Ii;
- let Inst{13-13} = Ii{3-3};
- let Inst{10-8} = Ii{2-0};
- bits <5> Rt32;
- let Inst{20-16} = Rt32{4-0};
- bits <5> Vs32;
- let Inst{4-0} = Vs32{4-0};
+class Enc_bd811a : OpcodeHexagon {
+ bits <5> Rs32;
+ let Inst{20-16} = Rs32{4-0};
+ bits <5> Cd32;
+ let Inst{4-0} = Cd32{4-0};
}
-class Enc_9752128 : OpcodeHexagon {
+class Enc_b05839 : OpcodeHexagon {
bits <7> Ii;
let Inst{8-5} = Ii{6-3};
+ bits <1> Mu2;
+ let Inst{13-13} = Mu2{0-0};
bits <5> Rdd32;
let Inst{4-0} = Rdd32{4-0};
bits <5> Rx32;
let Inst{20-16} = Rx32{4-0};
}
-class Enc_13618890 : OpcodeHexagon {
+class Enc_bc03e5 : OpcodeHexagon {
bits <17> Ii;
let Inst{26-25} = Ii{16-15};
let Inst{20-16} = Ii{14-10};
@@ -1343,33 +1082,7 @@ class Enc_13618890 : OpcodeHexagon {
bits <3> Nt8;
let Inst{10-8} = Nt8{2-0};
}
-class Enc_5890213 : OpcodeHexagon {
- bits <5> Vuu32;
- let Inst{12-8} = Vuu32{4-0};
- bits <5> Rt32;
- let Inst{20-16} = Rt32{4-0};
- bits <5> Vx32;
- let Inst{4-0} = Vx32{4-0};
-}
-class Enc_5582416 : OpcodeHexagon {
- bits <2> Ii;
- let Inst{13-13} = Ii{1-1};
- let Inst{7-7} = Ii{0-0};
- bits <6> II;
- let Inst{11-8} = II{5-2};
- let Inst{6-5} = II{1-0};
- bits <5> Rt32;
- let Inst{20-16} = Rt32{4-0};
- bits <5> Rdd32;
- let Inst{4-0} = Rdd32{4-0};
-}
-class Enc_13536408 : OpcodeHexagon {
- bits <4> Ii;
- let Inst{3-0} = Ii{3-0};
- bits <4> Rs16;
- let Inst{7-4} = Rs16{3-0};
-}
-class Enc_9773189 : OpcodeHexagon {
+class Enc_412ff0 : OpcodeHexagon {
bits <5> Rss32;
let Inst{20-16} = Rss32{4-0};
bits <5> Ru32;
@@ -1377,420 +1090,547 @@ class Enc_9773189 : OpcodeHexagon {
bits <5> Rxx32;
let Inst{12-8} = Rxx32{4-0};
}
-class Enc_2152247 : OpcodeHexagon {
- bits <4> Ii;
- let Inst{13-13} = Ii{3-3};
- let Inst{10-8} = Ii{2-0};
+class Enc_c9a18e : OpcodeHexagon {
+ bits <11> Ii;
+ let Inst{21-20} = Ii{10-9};
+ let Inst{7-1} = Ii{8-2};
+ bits <3> Ns8;
+ let Inst{18-16} = Ns8{2-0};
bits <5> Rt32;
- let Inst{20-16} = Rt32{4-0};
- bits <3> Os8;
- let Inst{2-0} = Os8{2-0};
+ let Inst{12-8} = Rt32{4-0};
+}
+class Enc_be32a5 : OpcodeHexagon {
+ bits <5> Rs32;
+ let Inst{20-16} = Rs32{4-0};
+ bits <5> Rt32;
+ let Inst{12-8} = Rt32{4-0};
+ bits <5> Rdd32;
+ let Inst{4-0} = Rdd32{4-0};
+}
+class Enc_e6abcf : OpcodeHexagon {
+ bits <5> Rs32;
+ let Inst{20-16} = Rs32{4-0};
+ bits <5> Rtt32;
+ let Inst{12-8} = Rtt32{4-0};
}
-class Enc_12848507 : OpcodeHexagon {
+class Enc_6339d5 : OpcodeHexagon {
bits <2> Ii;
let Inst{13-13} = Ii{1-1};
- let Inst{6-6} = Ii{0-0};
- bits <6> II;
- let Inst{5-0} = II{5-0};
+ let Inst{7-7} = Ii{0-0};
+ bits <2> Pv4;
+ let Inst{6-5} = Pv4{1-0};
+ bits <5> Rs32;
+ let Inst{20-16} = Rs32{4-0};
bits <5> Ru32;
- let Inst{20-16} = Ru32{4-0};
- bits <5> Rtt32;
- let Inst{12-8} = Rtt32{4-0};
+ let Inst{12-8} = Ru32{4-0};
+ bits <5> Rt32;
+ let Inst{4-0} = Rt32{4-0};
}
-class Enc_16279406 : OpcodeHexagon {
- bits <4> Ii;
- let Inst{13-13} = Ii{3-3};
- let Inst{10-8} = Ii{2-0};
- bits <2> Qv4;
- let Inst{12-11} = Qv4{1-0};
+class Enc_d6990d : OpcodeHexagon {
+ bits <5> Vuu32;
+ let Inst{12-8} = Vuu32{4-0};
bits <5> Rt32;
let Inst{20-16} = Rt32{4-0};
- bits <5> Vs32;
- let Inst{4-0} = Vs32{4-0};
+ bits <5> Vxx32;
+ let Inst{4-0} = Vxx32{4-0};
}
-class Enc_1734121 : OpcodeHexagon {
- bits <4> Ii;
- let Inst{10-8} = Ii{3-1};
- bits <4> Rs16;
- let Inst{7-4} = Rs16{3-0};
- bits <4> Rt16;
- let Inst{3-0} = Rt16{3-0};
+class Enc_6c9440 : OpcodeHexagon {
+ bits <10> Ii;
+ let Inst{21-21} = Ii{9-9};
+ let Inst{13-5} = Ii{8-0};
+ bits <5> Rd32;
+ let Inst{4-0} = Rd32{4-0};
}
-class Enc_766909 : OpcodeHexagon {
- bits <5> Rtt32;
- let Inst{12-8} = Rtt32{4-0};
+class Enc_0d8adb : OpcodeHexagon {
+ bits <8> Ii;
+ let Inst{12-5} = Ii{7-0};
bits <5> Rss32;
let Inst{20-16} = Rss32{4-0};
- bits <5> Rdd32;
- let Inst{4-0} = Rdd32{4-0};
- bits <2> Pe4;
- let Inst{6-5} = Pe4{1-0};
-}
-class Enc_4527648 : OpcodeHexagon {
- bits <5> Rs32;
- let Inst{20-16} = Rs32{4-0};
bits <2> Pd4;
let Inst{1-0} = Pd4{1-0};
}
-class Enc_8849208 : OpcodeHexagon {
- bits <7> Ii;
- let Inst{12-7} = Ii{6-1};
+class Enc_50e578 : OpcodeHexagon {
+ bits <5> Vu32;
+ let Inst{12-8} = Vu32{4-0};
bits <5> Rs32;
let Inst{20-16} = Rs32{4-0};
+ bits <5> Rd32;
+ let Inst{4-0} = Rd32{4-0};
+}
+class Enc_1cf4ca : OpcodeHexagon {
+ bits <6> Ii;
+ let Inst{17-16} = Ii{5-4};
+ let Inst{6-3} = Ii{3-0};
+ bits <2> Pv4;
+ let Inst{1-0} = Pv4{1-0};
bits <5> Rt32;
- let Inst{4-0} = Rt32{4-0};
+ let Inst{12-8} = Rt32{4-0};
+}
+class Enc_48b75f : OpcodeHexagon {
+ bits <5> Rs32;
+ let Inst{20-16} = Rs32{4-0};
+ bits <2> Pd4;
+ let Inst{1-0} = Pd4{1-0};
}
-class Enc_9894557 : OpcodeHexagon {
+class Enc_b97f71 : OpcodeHexagon {
bits <6> Ii;
- let Inst{13-8} = Ii{5-0};
- bits <6> II;
- let Inst{23-21} = II{5-3};
- let Inst{7-5} = II{2-0};
- bits <5> Rss32;
- let Inst{20-16} = Rss32{4-0};
+ let Inst{8-5} = Ii{5-2};
+ bits <2> Pt4;
+ let Inst{10-9} = Pt4{1-0};
+ bits <5> Rd32;
+ let Inst{4-0} = Rd32{4-0};
+ bits <5> Rx32;
+ let Inst{20-16} = Rx32{4-0};
+}
+class Enc_9d1247 : OpcodeHexagon {
+ bits <7> Ii;
+ let Inst{8-5} = Ii{6-3};
+ bits <2> Pt4;
+ let Inst{10-9} = Pt4{1-0};
bits <5> Rdd32;
let Inst{4-0} = Rdd32{4-0};
+ bits <5> Rx32;
+ let Inst{20-16} = Rx32{4-0};
}
-class Enc_4109168 : OpcodeHexagon {
- bits <2> Qv4;
- let Inst{23-22} = Qv4{1-0};
+class Enc_f4413a : OpcodeHexagon {
+ bits <4> Ii;
+ let Inst{8-5} = Ii{3-0};
+ bits <2> Pt4;
+ let Inst{10-9} = Pt4{1-0};
+ bits <5> Rd32;
+ let Inst{4-0} = Rd32{4-0};
+ bits <5> Rx32;
+ let Inst{20-16} = Rx32{4-0};
}
-class Enc_14560494 : OpcodeHexagon {
- bits <3> Ii;
+class Enc_f7430e : OpcodeHexagon {
+ bits <4> Ii;
+ let Inst{13-13} = Ii{3-3};
let Inst{10-8} = Ii{2-0};
bits <2> Pv4;
let Inst{12-11} = Pv4{1-0};
+ bits <5> Rt32;
+ let Inst{20-16} = Rt32{4-0};
+ bits <3> Os8;
+ let Inst{2-0} = Os8{2-0};
+}
+class Enc_e7581c : OpcodeHexagon {
+ bits <5> Vu32;
+ let Inst{12-8} = Vu32{4-0};
bits <5> Vd32;
let Inst{4-0} = Vd32{4-0};
- bits <5> Rx32;
- let Inst{20-16} = Rx32{4-0};
}
-class Enc_9773167 : OpcodeHexagon {
- bits <7> Ii;
- let Inst{12-7} = Ii{6-1};
- bits <5> II;
- let Inst{4-0} = II{4-0};
- bits <5> Rs32;
- let Inst{20-16} = Rs32{4-0};
+class Enc_2301d6 : OpcodeHexagon {
+ bits <6> Ii;
+ let Inst{20-16} = Ii{5-1};
+ let Inst{8-8} = Ii{0-0};
+ bits <2> Pt4;
+ let Inst{10-9} = Pt4{1-0};
+ bits <5> Rd32;
+ let Inst{4-0} = Rd32{4-0};
}
-class Enc_1898420 : OpcodeHexagon {
- bits <11> Ii;
- let Inst{21-20} = Ii{10-9};
- let Inst{7-1} = Ii{8-2};
- bits <3> Ns8;
- let Inst{18-16} = Ns8{2-0};
+class Enc_c31910 : OpcodeHexagon {
+ bits <8> Ii;
+ let Inst{23-21} = Ii{7-5};
+ let Inst{13-13} = Ii{4-4};
+ let Inst{7-5} = Ii{3-1};
+ let Inst{3-3} = Ii{0-0};
+ bits <5> II;
+ let Inst{12-8} = II{4-0};
+ bits <5> Rx32;
+ let Inst{20-16} = Rx32{4-0};
}
-class Enc_11498120 : OpcodeHexagon {
- bits <5> Vu32;
- let Inst{12-8} = Vu32{4-0};
+class Enc_2f2f04 : OpcodeHexagon {
+ bits <1> Ii;
+ let Inst{5-5} = Ii{0-0};
+ bits <5> Vuu32;
+ let Inst{12-8} = Vuu32{4-0};
bits <5> Rt32;
let Inst{20-16} = Rt32{4-0};
- bits <2> Qd4;
- let Inst{1-0} = Qd4{1-0};
+ bits <5> Vdd32;
+ let Inst{4-0} = Vdd32{4-0};
}
-class Enc_15459921 : OpcodeHexagon {
- bits <3> Ii;
+class Enc_8d8a30 : OpcodeHexagon {
+ bits <4> Ii;
+ let Inst{13-13} = Ii{3-3};
let Inst{10-8} = Ii{2-0};
bits <2> Pv4;
let Inst{12-11} = Pv4{1-0};
- bits <5> Vs32;
- let Inst{4-0} = Vs32{4-0};
- bits <5> Rx32;
- let Inst{20-16} = Rx32{4-0};
-}
-class Enc_10058269 : OpcodeHexagon {
- bits <5> Vu32;
- let Inst{12-8} = Vu32{4-0};
bits <5> Rt32;
let Inst{20-16} = Rt32{4-0};
- bits <5> Vx32;
- let Inst{4-0} = Vx32{4-0};
-}
-class Enc_10197700 : OpcodeHexagon {
- bits <5> Vuu32;
- let Inst{20-16} = Vuu32{4-0};
- bits <5> Vvv32;
- let Inst{12-8} = Vvv32{4-0};
- bits <3> Rt8;
- let Inst{2-0} = Rt8{2-0};
- bits <5> Vdd32;
- let Inst{7-3} = Vdd32{4-0};
-}
-class Enc_12608570 : OpcodeHexagon {
- bits <17> Ii;
- let Inst{26-25} = Ii{16-15};
- let Inst{20-16} = Ii{14-10};
- let Inst{13-5} = Ii{9-1};
- bits <5> Rd32;
- let Inst{4-0} = Rd32{4-0};
-}
-class Enc_4804090 : OpcodeHexagon {
- bits <6> Ss64;
- let Inst{21-16} = Ss64{5-0};
- bits <5> Rd32;
- let Inst{4-0} = Rd32{4-0};
-}
-class Enc_14973146 : OpcodeHexagon {
- bits <5> Vu32;
- let Inst{20-16} = Vu32{4-0};
- bits <5> Vv32;
- let Inst{12-8} = Vv32{4-0};
- bits <3> Qd8;
- let Inst{5-3} = Qd8{2-0};
+ bits <5> Vd32;
+ let Inst{4-0} = Vd32{4-0};
}
-class Enc_5718302 : OpcodeHexagon {
+class Enc_2d7491 : OpcodeHexagon {
+ bits <13> Ii;
+ let Inst{26-25} = Ii{12-11};
+ let Inst{13-5} = Ii{10-2};
bits <5> Rs32;
let Inst{20-16} = Rs32{4-0};
- bits <5> Rd32;
- let Inst{4-0} = Rd32{4-0};
- bits <2> Pe4;
- let Inst{6-5} = Pe4{1-0};
+ bits <5> Rdd32;
+ let Inst{4-0} = Rdd32{4-0};
}
-class Enc_2103742 : OpcodeHexagon {
- bits <5> Ii;
- let Inst{12-8} = Ii{4-0};
+class Enc_a803e0 : OpcodeHexagon {
+ bits <7> Ii;
+ let Inst{12-7} = Ii{6-1};
+ bits <8> II;
+ let Inst{13-13} = II{7-7};
+ let Inst{6-0} = II{6-0};
bits <5> Rs32;
let Inst{20-16} = Rs32{4-0};
- bits <2> Pd4;
- let Inst{1-0} = Pd4{1-0};
}
-class Enc_7564330 : OpcodeHexagon {
+class Enc_45364e : OpcodeHexagon {
bits <5> Vu32;
- let Inst{20-16} = Vu32{4-0};
+ let Inst{12-8} = Vu32{4-0};
bits <5> Vv32;
- let Inst{12-8} = Vv32{4-0};
- bits <3> Rt8;
- let Inst{2-0} = Rt8{2-0};
+ let Inst{20-16} = Vv32{4-0};
bits <5> Vd32;
- let Inst{7-3} = Vd32{4-0};
+ let Inst{4-0} = Vd32{4-0};
}
-class Enc_2176383 : OpcodeHexagon {
- bits <6> Ii;
- let Inst{9-4} = Ii{5-0};
- bits <4> Rd16;
- let Inst{3-0} = Rd16{3-0};
+class Enc_b909d2 : OpcodeHexagon {
+ bits <11> Ii;
+ let Inst{21-20} = Ii{10-9};
+ let Inst{7-1} = Ii{8-2};
+ bits <4> Rs16;
+ let Inst{19-16} = Rs16{3-0};
+ bits <7> n1;
+ let Inst{28-28} = n1{6-6};
+ let Inst{25-22} = n1{5-2};
+ let Inst{13-13} = n1{1-1};
+ let Inst{8-8} = n1{0-0};
}
-class Enc_7736768 : OpcodeHexagon {
+class Enc_e6c957 : OpcodeHexagon {
+ bits <10> Ii;
+ let Inst{21-21} = Ii{9-9};
+ let Inst{13-5} = Ii{8-0};
+ bits <5> Rdd32;
+ let Inst{4-0} = Rdd32{4-0};
+}
+class Enc_fa3ba4 : OpcodeHexagon {
+ bits <14> Ii;
+ let Inst{26-25} = Ii{13-12};
+ let Inst{13-5} = Ii{11-3};
+ bits <5> Rs32;
+ let Inst{20-16} = Rs32{4-0};
+ bits <5> Rdd32;
+ let Inst{4-0} = Rdd32{4-0};
+}
+class Enc_0d8870 : OpcodeHexagon {
bits <12> Ii;
let Inst{26-25} = Ii{11-10};
let Inst{13-13} = Ii{9-9};
let Inst{7-0} = Ii{8-1};
bits <5> Rs32;
let Inst{20-16} = Rs32{4-0};
- bits <5> Rt32;
- let Inst{12-8} = Rt32{4-0};
+ bits <3> Nt8;
+ let Inst{10-8} = Nt8{2-0};
}
-class Enc_13189194 : OpcodeHexagon {
- bits <1> Ii;
- let Inst{5-5} = Ii{0-0};
- bits <5> Vuu32;
- let Inst{12-8} = Vuu32{4-0};
- bits <5> Rt32;
- let Inst{20-16} = Rt32{4-0};
- bits <5> Vxx32;
- let Inst{4-0} = Vxx32{4-0};
+class Enc_9fae8a : OpcodeHexagon {
+ bits <6> Ii;
+ let Inst{13-8} = Ii{5-0};
+ bits <5> Rs32;
+ let Inst{20-16} = Rs32{4-0};
+ bits <5> Rd32;
+ let Inst{4-0} = Rd32{4-0};
}
-class Enc_5154851 : OpcodeHexagon {
- bits <5> Rtt32;
- let Inst{20-16} = Rtt32{4-0};
- bits <5> Vdd32;
- let Inst{7-3} = Vdd32{4-0};
+class Enc_18c338 : OpcodeHexagon {
+ bits <8> Ii;
+ let Inst{12-5} = Ii{7-0};
+ bits <8> II;
+ let Inst{22-16} = II{7-1};
+ let Inst{13-13} = II{0-0};
+ bits <5> Rdd32;
+ let Inst{4-0} = Rdd32{4-0};
+}
+class Enc_5ccba9 : OpcodeHexagon {
+ bits <8> Ii;
+ let Inst{12-7} = Ii{7-2};
+ bits <6> II;
+ let Inst{13-13} = II{5-5};
+ let Inst{4-0} = II{4-0};
+ bits <2> Pv4;
+ let Inst{6-5} = Pv4{1-0};
+ bits <5> Rs32;
+ let Inst{20-16} = Rs32{4-0};
}
-class Enc_1329520 : OpcodeHexagon {
+class Enc_0ed752 : OpcodeHexagon {
bits <5> Rss32;
let Inst{20-16} = Rss32{4-0};
bits <5> Cdd32;
let Inst{4-0} = Cdd32{4-0};
}
-class Enc_14057553 : OpcodeHexagon {
- bits <16> Ii;
- let Inst{21-21} = Ii{15-15};
- let Inst{13-8} = Ii{14-9};
- let Inst{2-0} = Ii{8-6};
- bits <5> Vd32;
- let Inst{7-3} = Vd32{4-0};
- bits <5> Rx32;
- let Inst{20-16} = Rx32{4-0};
-}
-class Enc_9223889 : OpcodeHexagon {
+class Enc_143445 : OpcodeHexagon {
+ bits <13> Ii;
+ let Inst{26-25} = Ii{12-11};
+ let Inst{13-13} = Ii{10-10};
+ let Inst{7-0} = Ii{9-2};
bits <5> Rs32;
let Inst{20-16} = Rs32{4-0};
bits <5> Rt32;
let Inst{12-8} = Rt32{4-0};
+}
+class Enc_3a3d62 : OpcodeHexagon {
+ bits <5> Rs32;
+ let Inst{20-16} = Rs32{4-0};
+ bits <5> Rdd32;
+ let Inst{4-0} = Rdd32{4-0};
+}
+class Enc_3e3989 : OpcodeHexagon {
+ bits <11> Ii;
+ let Inst{21-20} = Ii{10-9};
+ let Inst{7-1} = Ii{8-2};
+ bits <4> Rs16;
+ let Inst{19-16} = Rs16{3-0};
+ bits <6> n1;
+ let Inst{28-28} = n1{5-5};
+ let Inst{25-22} = n1{4-1};
+ let Inst{8-8} = n1{0-0};
+}
+class Enc_152467 : OpcodeHexagon {
+ bits <5> Ii;
+ let Inst{8-5} = Ii{4-1};
+ bits <5> Rd32;
+ let Inst{4-0} = Rd32{4-0};
bits <5> Rx32;
- let Inst{4-0} = Rx32{4-0};
+ let Inst{20-16} = Rx32{4-0};
+}
+class Enc_daea09 : OpcodeHexagon {
+ bits <17> Ii;
+ let Inst{23-22} = Ii{16-15};
+ let Inst{20-16} = Ii{14-10};
+ let Inst{13-13} = Ii{9-9};
+ let Inst{7-1} = Ii{8-2};
+ bits <2> Pu4;
+ let Inst{9-8} = Pu4{1-0};
}
-class Enc_10979813 : OpcodeHexagon {
+class Enc_f37377 : OpcodeHexagon {
+ bits <8> Ii;
+ let Inst{12-7} = Ii{7-2};
+ bits <8> II;
+ let Inst{13-13} = II{7-7};
+ let Inst{6-0} = II{6-0};
+ bits <5> Rs32;
+ let Inst{20-16} = Rs32{4-0};
+}
+class Enc_a198f6 : OpcodeHexagon {
bits <7> Ii;
- let Inst{13-13} = Ii{6-6};
- let Inst{7-3} = Ii{5-1};
- bits <2> Pv4;
- let Inst{1-0} = Pv4{1-0};
+ let Inst{10-5} = Ii{6-1};
+ bits <2> Pt4;
+ let Inst{12-11} = Pt4{1-0};
bits <5> Rs32;
let Inst{20-16} = Rs32{4-0};
- bits <5> Rt32;
- let Inst{12-8} = Rt32{4-0};
+ bits <5> Rd32;
+ let Inst{4-0} = Rd32{4-0};
}
-class Enc_13490067 : OpcodeHexagon {
- bits <3> Qt8;
- let Inst{2-0} = Qt8{2-0};
+class Enc_3dac0b : OpcodeHexagon {
+ bits <2> Qt4;
+ let Inst{6-5} = Qt4{1-0};
bits <5> Vu32;
- let Inst{20-16} = Vu32{4-0};
+ let Inst{12-8} = Vu32{4-0};
bits <5> Vv32;
- let Inst{12-8} = Vv32{4-0};
- bits <5> Vd32;
- let Inst{7-3} = Vd32{4-0};
-}
-class Enc_10076500 : OpcodeHexagon {
- bits <2> Ii;
- let Inst{13-13} = Ii{1-1};
- let Inst{6-6} = Ii{0-0};
- bits <6> II;
- let Inst{5-0} = II{5-0};
- bits <5> Ru32;
- let Inst{20-16} = Ru32{4-0};
- bits <3> Nt8;
- let Inst{10-8} = Nt8{2-0};
+ let Inst{20-16} = Vv32{4-0};
+ bits <5> Vdd32;
+ let Inst{4-0} = Vdd32{4-0};
}
-class Enc_163381 : OpcodeHexagon {
- bits <14> Ii;
- let Inst{26-25} = Ii{13-12};
- let Inst{13-5} = Ii{11-3};
+class Enc_e38e1f : OpcodeHexagon {
+ bits <8> Ii;
+ let Inst{12-5} = Ii{7-0};
+ bits <2> Pu4;
+ let Inst{22-21} = Pu4{1-0};
bits <5> Rs32;
let Inst{20-16} = Rs32{4-0};
- bits <5> Rdd32;
- let Inst{4-0} = Rdd32{4-0};
+ bits <5> Rd32;
+ let Inst{4-0} = Rd32{4-0};
}
-class Enc_10328975 : OpcodeHexagon {
- bits <2> Pt4;
- let Inst{9-8} = Pt4{1-0};
- bits <5> Rdd32;
- let Inst{4-0} = Rdd32{4-0};
+class Enc_f8ecf9 : OpcodeHexagon {
+ bits <5> Vuu32;
+ let Inst{12-8} = Vuu32{4-0};
+ bits <5> Vvv32;
+ let Inst{20-16} = Vvv32{4-0};
+ bits <5> Vdd32;
+ let Inst{4-0} = Vdd32{4-0};
}
-class Enc_14939491 : OpcodeHexagon {
- bits <4> Rs16;
- let Inst{7-4} = Rs16{3-0};
+class Enc_7f1a05 : OpcodeHexagon {
+ bits <5> Ru32;
+ let Inst{4-0} = Ru32{4-0};
+ bits <5> Rs32;
+ let Inst{20-16} = Rs32{4-0};
+ bits <5> Ry32;
+ let Inst{12-8} = Ry32{4-0};
+}
+class Enc_2df31d : OpcodeHexagon {
+ bits <8> Ii;
+ let Inst{9-4} = Ii{7-2};
bits <4> Rd16;
let Inst{3-0} = Rd16{3-0};
}
-class Enc_8891794 : OpcodeHexagon {
- bits <2> Pt4;
- let Inst{9-8} = Pt4{1-0};
- bits <2> Ps4;
- let Inst{17-16} = Ps4{1-0};
- bits <2> Pd4;
- let Inst{1-0} = Pd4{1-0};
-}
-class Enc_7723767 : OpcodeHexagon {
- bits <5> Vuu32;
- let Inst{12-8} = Vuu32{4-0};
- bits <5> Rt32;
- let Inst{20-16} = Rt32{4-0};
- bits <5> Vd32;
- let Inst{7-3} = Vd32{4-0};
+class Enc_25bef0 : OpcodeHexagon {
+ bits <16> Ii;
+ let Inst{26-25} = Ii{15-14};
+ let Inst{20-16} = Ii{13-9};
+ let Inst{13-5} = Ii{8-0};
+ bits <5> Rd32;
+ let Inst{4-0} = Rd32{4-0};
}
-class Enc_2639299 : OpcodeHexagon {
+class Enc_f82302 : OpcodeHexagon {
bits <11> Ii;
let Inst{21-20} = Ii{10-9};
let Inst{7-1} = Ii{8-2};
- bits <4> Rs16;
- let Inst{19-16} = Rs16{3-0};
- bits <4> Rd16;
- let Inst{11-8} = Rd16{3-0};
+ bits <3> Ns8;
+ let Inst{18-16} = Ns8{2-0};
+ bits <4> n1;
+ let Inst{29-29} = n1{3-3};
+ let Inst{26-25} = n1{2-1};
+ let Inst{23-23} = n1{0-0};
}
-class Enc_11552785 : OpcodeHexagon {
- bits <5> Rtt32;
- let Inst{12-8} = Rtt32{4-0};
- bits <5> Rss32;
- let Inst{20-16} = Rss32{4-0};
- bits <2> Pu4;
- let Inst{6-5} = Pu4{1-0};
- bits <5> Rdd32;
- let Inst{4-0} = Rdd32{4-0};
+class Enc_83ee64 : OpcodeHexagon {
+ bits <5> Ii;
+ let Inst{12-8} = Ii{4-0};
+ bits <5> Rs32;
+ let Inst{20-16} = Rs32{4-0};
+ bits <2> Pd4;
+ let Inst{1-0} = Pd4{1-0};
}
-class Enc_11849200 : OpcodeHexagon {
+class Enc_adf111 : OpcodeHexagon {
+ bits <5> Vu32;
+ let Inst{12-8} = Vu32{4-0};
+ bits <5> Rt32;
+ let Inst{20-16} = Rt32{4-0};
+ bits <2> Qx4;
+ let Inst{1-0} = Qx4{1-0};
+}
+class Enc_46c951 : OpcodeHexagon {
bits <6> Ii;
let Inst{12-7} = Ii{5-0};
+ bits <5> II;
+ let Inst{4-0} = II{4-0};
bits <5> Rs32;
let Inst{20-16} = Rs32{4-0};
- bits <5> Rt32;
- let Inst{4-0} = Rt32{4-0};
}
-class Enc_14868535 : OpcodeHexagon {
- bits <17> Ii;
- let Inst{23-22} = Ii{16-15};
- let Inst{20-16} = Ii{14-10};
- let Inst{13-13} = Ii{9-9};
- let Inst{7-1} = Ii{8-2};
- bits <2> Pu4;
- let Inst{9-8} = Pu4{1-0};
+class Enc_5d6c34 : OpcodeHexagon {
+ bits <6> Ii;
+ let Inst{13-8} = Ii{5-0};
+ bits <5> Rs32;
+ let Inst{20-16} = Rs32{4-0};
+ bits <2> Pd4;
+ let Inst{1-0} = Pd4{1-0};
+}
+class Enc_4df4e9 : OpcodeHexagon {
+ bits <11> Ii;
+ let Inst{26-25} = Ii{10-9};
+ let Inst{13-13} = Ii{8-8};
+ let Inst{7-0} = Ii{7-0};
+ bits <5> Rs32;
+ let Inst{20-16} = Rs32{4-0};
+ bits <3> Nt8;
+ let Inst{10-8} = Nt8{2-0};
}
-class Enc_48594 : OpcodeHexagon {
+class Enc_91b9fe : OpcodeHexagon {
+ bits <5> Ii;
+ let Inst{6-3} = Ii{4-1};
bits <1> Mu2;
let Inst{13-13} = Mu2{0-0};
- bits <5> Rd32;
- let Inst{4-0} = Rd32{4-0};
+ bits <3> Nt8;
+ let Inst{10-8} = Nt8{2-0};
bits <5> Rx32;
let Inst{20-16} = Rx32{4-0};
}
-class Enc_6608821 : OpcodeHexagon {
- bits <4> Ii;
+class Enc_a7b8e8 : OpcodeHexagon {
+ bits <6> Ii;
+ let Inst{22-21} = Ii{5-4};
let Inst{13-13} = Ii{3-3};
- let Inst{10-8} = Ii{2-0};
- bits <5> Rt32;
- let Inst{20-16} = Rt32{4-0};
- bits <3> Os8;
- let Inst{2-0} = Os8{2-0};
-}
-class Enc_11049656 : OpcodeHexagon {
- bits <9> Ii;
- let Inst{13-13} = Ii{8-8};
- let Inst{7-3} = Ii{7-3};
- bits <2> Pv4;
- let Inst{1-0} = Pv4{1-0};
+ let Inst{7-5} = Ii{2-0};
bits <5> Rs32;
let Inst{20-16} = Rs32{4-0};
+ bits <5> Rt32;
+ let Inst{12-8} = Rt32{4-0};
+ bits <5> Rd32;
+ let Inst{4-0} = Rd32{4-0};
+}
+class Enc_2b3f60 : OpcodeHexagon {
+ bits <5> Rss32;
+ let Inst{20-16} = Rss32{4-0};
bits <5> Rtt32;
let Inst{12-8} = Rtt32{4-0};
+ bits <5> Rdd32;
+ let Inst{4-0} = Rdd32{4-0};
+ bits <2> Px4;
+ let Inst{6-5} = Px4{1-0};
}
-class Enc_117962 : OpcodeHexagon {
- bits <8> Ii;
- let Inst{23-21} = Ii{7-5};
- let Inst{13-13} = Ii{4-4};
- let Inst{7-5} = Ii{3-1};
- let Inst{3-3} = Ii{0-0};
- bits <5> II;
- let Inst{12-8} = II{4-0};
+class Enc_bd1cbc : OpcodeHexagon {
+ bits <5> Ii;
+ let Inst{8-5} = Ii{4-1};
+ bits <5> Ryy32;
+ let Inst{4-0} = Ryy32{4-0};
bits <5> Rx32;
let Inst{20-16} = Rx32{4-0};
}
-class Enc_5900401 : OpcodeHexagon {
- bits <4> Ii;
- let Inst{6-3} = Ii{3-0};
- bits <3> Nt8;
- let Inst{10-8} = Nt8{2-0};
- bits <5> Rx32;
- let Inst{20-16} = Rx32{4-0};
+class Enc_a30110 : OpcodeHexagon {
+ bits <5> Vu32;
+ let Inst{12-8} = Vu32{4-0};
+ bits <5> Vv32;
+ let Inst{23-19} = Vv32{4-0};
+ bits <3> Rt8;
+ let Inst{18-16} = Rt8{2-0};
+ bits <5> Vd32;
+ let Inst{4-0} = Vd32{4-0};
}
-class Enc_36641 : OpcodeHexagon {
- bits <5> Vuu32;
- let Inst{12-8} = Vuu32{4-0};
+class Enc_f3f408 : OpcodeHexagon {
+ bits <4> Ii;
+ let Inst{13-13} = Ii{3-3};
+ let Inst{10-8} = Ii{2-0};
bits <5> Rt32;
let Inst{20-16} = Rt32{4-0};
bits <5> Vd32;
let Inst{4-0} = Vd32{4-0};
}
-class Enc_9626139 : OpcodeHexagon {
- bits <2> Pu4;
- let Inst{6-5} = Pu4{1-0};
+class Enc_690862 : OpcodeHexagon {
+ bits <13> Ii;
+ let Inst{26-25} = Ii{12-11};
+ let Inst{13-13} = Ii{10-10};
+ let Inst{7-0} = Ii{9-2};
+ bits <5> Rs32;
+ let Inst{20-16} = Rs32{4-0};
+ bits <3> Nt8;
+ let Inst{10-8} = Nt8{2-0};
+}
+class Enc_2a3787 : OpcodeHexagon {
+ bits <13> Ii;
+ let Inst{26-25} = Ii{12-11};
+ let Inst{13-5} = Ii{10-2};
bits <5> Rs32;
let Inst{20-16} = Rs32{4-0};
- bits <5> Rt32;
- let Inst{12-8} = Rt32{4-0};
bits <5> Rd32;
let Inst{4-0} = Rd32{4-0};
}
-class Enc_11971407 : OpcodeHexagon {
+class Enc_d5c73f : OpcodeHexagon {
+ bits <1> Mu2;
+ let Inst{13-13} = Mu2{0-0};
+ bits <5> Rt32;
+ let Inst{12-8} = Rt32{4-0};
+ bits <5> Rx32;
+ let Inst{20-16} = Rx32{4-0};
+}
+class Enc_3f97c8 : OpcodeHexagon {
+ bits <6> Ii;
+ let Inst{6-3} = Ii{5-2};
+ bits <1> Mu2;
+ let Inst{13-13} = Mu2{0-0};
+ bits <3> Nt8;
+ let Inst{10-8} = Nt8{2-0};
+ bits <5> Rx32;
+ let Inst{20-16} = Rx32{4-0};
+}
+class Enc_d50cd3 : OpcodeHexagon {
+ bits <3> Ii;
+ let Inst{7-5} = Ii{2-0};
+ bits <5> Rss32;
+ let Inst{20-16} = Rss32{4-0};
+ bits <5> Rtt32;
+ let Inst{12-8} = Rtt32{4-0};
+ bits <5> Rdd32;
+ let Inst{4-0} = Rdd32{4-0};
+}
+class Enc_729ff7 : OpcodeHexagon {
bits <3> Ii;
let Inst{7-5} = Ii{2-0};
bits <5> Rtt32;
@@ -1800,37 +1640,32 @@ class Enc_11971407 : OpcodeHexagon {
bits <5> Rdd32;
let Inst{4-0} = Rdd32{4-0};
}
-class Enc_9852473 : OpcodeHexagon {
- bits <13> Ii;
- let Inst{26-25} = Ii{12-11};
- let Inst{13-5} = Ii{10-2};
- bits <5> Rs32;
- let Inst{20-16} = Rs32{4-0};
+class Enc_217147 : OpcodeHexagon {
+ bits <2> Qv4;
+ let Inst{23-22} = Qv4{1-0};
+}
+class Enc_b9c5fb : OpcodeHexagon {
+ bits <5> Rss32;
+ let Inst{20-16} = Rss32{4-0};
bits <5> Rdd32;
let Inst{4-0} = Rdd32{4-0};
}
-class Enc_6495334 : OpcodeHexagon {
- bits <6> Ii;
- let Inst{22-21} = Ii{5-4};
- let Inst{13-13} = Ii{3-3};
- let Inst{7-5} = Ii{2-0};
- bits <5> Rs32;
- let Inst{20-16} = Rs32{4-0};
- bits <5> Ru32;
- let Inst{4-0} = Ru32{4-0};
- bits <5> Rd32;
- let Inst{12-8} = Rd32{4-0};
+class Enc_f394d3 : OpcodeHexagon {
+ bits <6> II;
+ let Inst{11-8} = II{5-2};
+ let Inst{6-5} = II{1-0};
+ bits <5> Ryy32;
+ let Inst{4-0} = Ryy32{4-0};
+ bits <5> Re32;
+ let Inst{20-16} = Re32{4-0};
}
-class Enc_1186018 : OpcodeHexagon {
- bits <17> Ii;
- let Inst{26-25} = Ii{16-15};
- let Inst{20-16} = Ii{14-10};
- let Inst{13-13} = Ii{9-9};
- let Inst{7-0} = Ii{8-1};
- bits <5> Rt32;
- let Inst{12-8} = Rt32{4-0};
+class Enc_0cb018 : OpcodeHexagon {
+ bits <5> Cs32;
+ let Inst{20-16} = Cs32{4-0};
+ bits <5> Rd32;
+ let Inst{4-0} = Rd32{4-0};
}
-class Enc_15999208 : OpcodeHexagon {
+class Enc_541f26 : OpcodeHexagon {
bits <18> Ii;
let Inst{26-25} = Ii{17-16};
let Inst{20-16} = Ii{15-11};
@@ -1839,446 +1674,302 @@ class Enc_15999208 : OpcodeHexagon {
bits <5> Rt32;
let Inst{12-8} = Rt32{4-0};
}
-class Enc_11477246 : OpcodeHexagon {
+class Enc_724154 : OpcodeHexagon {
bits <6> II;
let Inst{5-0} = II{5-0};
- bits <5> Rt32;
- let Inst{12-8} = Rt32{4-0};
+ bits <3> Nt8;
+ let Inst{10-8} = Nt8{2-0};
bits <5> Re32;
let Inst{20-16} = Re32{4-0};
}
-class Enc_7971062 : OpcodeHexagon {
- bits <16> Ii;
- let Inst{23-22} = Ii{15-14};
- let Inst{20-16} = Ii{13-9};
- let Inst{13-5} = Ii{8-0};
- bits <5> Rd32;
- let Inst{4-0} = Rd32{4-0};
-}
-class Enc_4327792 : OpcodeHexagon {
- bits <5> Vuu32;
- let Inst{12-8} = Vuu32{4-0};
- bits <5> Rt32;
- let Inst{20-16} = Rt32{4-0};
- bits <5> Vxx32;
- let Inst{4-0} = Vxx32{4-0};
+class Enc_179b35 : OpcodeHexagon {
+ bits <5> Rs32;
+ let Inst{20-16} = Rs32{4-0};
+ bits <5> Rtt32;
+ let Inst{12-8} = Rtt32{4-0};
+ bits <5> Rx32;
+ let Inst{4-0} = Rx32{4-0};
}
-class Enc_10326434 : OpcodeHexagon {
- bits <5> Ii;
- let Inst{6-3} = Ii{4-1};
- bits <1> Mu2;
- let Inst{13-13} = Mu2{0-0};
+class Enc_585242 : OpcodeHexagon {
+ bits <6> Ii;
+ let Inst{13-13} = Ii{5-5};
+ let Inst{7-3} = Ii{4-0};
+ bits <2> Pv4;
+ let Inst{1-0} = Pv4{1-0};
+ bits <5> Rs32;
+ let Inst{20-16} = Rs32{4-0};
bits <3> Nt8;
let Inst{10-8} = Nt8{2-0};
- bits <5> Rx32;
- let Inst{20-16} = Rx32{4-0};
-}
-class Enc_1572239 : OpcodeHexagon {
- bits <2> Qt4;
- let Inst{6-5} = Qt4{1-0};
- bits <5> Vu32;
- let Inst{12-8} = Vu32{4-0};
- bits <5> Vv32;
- let Inst{20-16} = Vv32{4-0};
- bits <5> Vd32;
- let Inst{4-0} = Vd32{4-0};
}
-class Enc_6372758 : OpcodeHexagon {
- bits <4> Ii;
- let Inst{8-5} = Ii{3-0};
- bits <5> Ryy32;
- let Inst{4-0} = Ryy32{4-0};
+class Enc_cf1927 : OpcodeHexagon {
+ bits <1> Mu2;
+ let Inst{13-13} = Mu2{0-0};
+ bits <3> Os8;
+ let Inst{2-0} = Os8{2-0};
bits <5> Rx32;
let Inst{20-16} = Rx32{4-0};
}
-class Enc_15793331 : OpcodeHexagon {
- bits <5> Vu32;
- let Inst{20-16} = Vu32{4-0};
- bits <5> Vv32;
- let Inst{12-8} = Vv32{4-0};
- bits <5> Vx32;
- let Inst{7-3} = Vx32{4-0};
+class Enc_b84c4c : OpcodeHexagon {
+ bits <6> Ii;
+ let Inst{13-8} = Ii{5-0};
+ bits <6> II;
+ let Inst{23-21} = II{5-3};
+ let Inst{7-5} = II{2-0};
+ bits <5> Rss32;
+ let Inst{20-16} = Rss32{4-0};
+ bits <5> Rdd32;
+ let Inst{4-0} = Rdd32{4-0};
}
-class Enc_11424254 : OpcodeHexagon {
- bits <2> Qt4;
- let Inst{6-5} = Qt4{1-0};
- bits <5> Vu32;
- let Inst{12-8} = Vu32{4-0};
- bits <5> Vv32;
- let Inst{20-16} = Vv32{4-0};
- bits <5> Vdd32;
- let Inst{4-0} = Vdd32{4-0};
+class Enc_9ac432 : OpcodeHexagon {
+ bits <2> Ps4;
+ let Inst{17-16} = Ps4{1-0};
+ bits <2> Pt4;
+ let Inst{9-8} = Pt4{1-0};
+ bits <2> Pu4;
+ let Inst{7-6} = Pu4{1-0};
+ bits <2> Pd4;
+ let Inst{1-0} = Pd4{1-0};
}
-class Enc_4983213 : OpcodeHexagon {
- bits <14> Ii;
- let Inst{10-0} = Ii{13-3};
+class Enc_8203bb : OpcodeHexagon {
+ bits <6> Ii;
+ let Inst{12-7} = Ii{5-0};
+ bits <8> II;
+ let Inst{13-13} = II{7-7};
+ let Inst{6-0} = II{6-0};
bits <5> Rs32;
let Inst{20-16} = Rs32{4-0};
}
-class Enc_16035138 : OpcodeHexagon {
- bits <5> Vu32;
- let Inst{12-8} = Vu32{4-0};
- bits <5> Rt32;
- let Inst{20-16} = Rt32{4-0};
-}
-class Enc_8225953 : OpcodeHexagon {
- bits <8> Ii;
- let Inst{13-13} = Ii{7-7};
- let Inst{7-3} = Ii{6-2};
- bits <2> Pv4;
- let Inst{1-0} = Pv4{1-0};
+class Enc_e66a97 : OpcodeHexagon {
+ bits <7> Ii;
+ let Inst{12-7} = Ii{6-1};
+ bits <5> II;
+ let Inst{4-0} = II{4-0};
bits <5> Rs32;
let Inst{20-16} = Rs32{4-0};
- bits <5> Rt32;
- let Inst{12-8} = Rt32{4-0};
-}
-class Enc_4397470 : OpcodeHexagon {
- bits <5> II;
- let Inst{12-8} = II{4-0};
- bits <11> Ii;
- let Inst{21-20} = Ii{10-9};
- let Inst{7-1} = Ii{8-2};
- bits <3> Ns8;
- let Inst{18-16} = Ns8{2-0};
}
-class Enc_1004392 : OpcodeHexagon {
+class Enc_8c2412 : OpcodeHexagon {
+ bits <2> Ps4;
+ let Inst{6-5} = Ps4{1-0};
bits <5> Vu32;
- let Inst{20-16} = Vu32{4-0};
+ let Inst{12-8} = Vu32{4-0};
bits <5> Vv32;
- let Inst{12-8} = Vv32{4-0};
- bits <5> Vxx32;
- let Inst{7-3} = Vxx32{4-0};
-}
-class Enc_16319737 : OpcodeHexagon {
- bits <14> Ii;
- let Inst{26-25} = Ii{13-12};
- let Inst{13-13} = Ii{11-11};
- let Inst{7-0} = Ii{10-3};
- bits <5> Rs32;
- let Inst{20-16} = Rs32{4-0};
- bits <5> Rtt32;
- let Inst{12-8} = Rtt32{4-0};
-}
-class Enc_2296022 : OpcodeHexagon {
- bits <3> Ii;
- let Inst{10-8} = Ii{2-0};
- bits <5> Vs32;
- let Inst{4-0} = Vs32{4-0};
- bits <5> Rx32;
- let Inst{20-16} = Rx32{4-0};
-}
-class Enc_9664427 : OpcodeHexagon {
- bits <5> Vuu32;
- let Inst{20-16} = Vuu32{4-0};
- bits <5> Vvv32;
- let Inst{12-8} = Vvv32{4-0};
- bits <3> Qss8;
- let Inst{2-0} = Qss8{2-0};
- bits <5> Vd32;
- let Inst{7-3} = Vd32{4-0};
+ let Inst{20-16} = Vv32{4-0};
+ bits <5> Vdd32;
+ let Inst{4-0} = Vdd32{4-0};
}
-class Enc_877823 : OpcodeHexagon {
- bits <6> II;
- let Inst{11-8} = II{5-2};
- let Inst{6-5} = II{1-0};
- bits <5> Rdd32;
- let Inst{4-0} = Rdd32{4-0};
- bits <5> Re32;
- let Inst{20-16} = Re32{4-0};
+class Enc_284ebb : OpcodeHexagon {
+ bits <2> Ps4;
+ let Inst{17-16} = Ps4{1-0};
+ bits <2> Pt4;
+ let Inst{9-8} = Pt4{1-0};
+ bits <2> Pd4;
+ let Inst{1-0} = Pd4{1-0};
}
-class Enc_1589406 : OpcodeHexagon {
- bits <1> Mu2;
- let Inst{13-13} = Mu2{0-0};
- bits <3> Os8;
- let Inst{2-0} = Os8{2-0};
+class Enc_733b27 : OpcodeHexagon {
+ bits <5> Ii;
+ let Inst{8-5} = Ii{4-1};
+ bits <2> Pt4;
+ let Inst{10-9} = Pt4{1-0};
+ bits <5> Rd32;
+ let Inst{4-0} = Rd32{4-0};
bits <5> Rx32;
let Inst{20-16} = Rx32{4-0};
}
-class Enc_6900405 : OpcodeHexagon {
- bits <5> Ii;
- let Inst{6-3} = Ii{4-1};
- bits <3> Nt8;
- let Inst{10-8} = Nt8{2-0};
+class Enc_22c845 : OpcodeHexagon {
+ bits <14> Ii;
+ let Inst{10-0} = Ii{13-3};
bits <5> Rx32;
let Inst{20-16} = Rx32{4-0};
}
-class Enc_14150875 : OpcodeHexagon {
- bits <11> Ii;
- let Inst{21-20} = Ii{10-9};
- let Inst{7-1} = Ii{8-2};
- bits <4> Rs16;
- let Inst{19-16} = Rs16{3-0};
- bits <5> n1;
- let Inst{28-28} = n1{4-4};
- let Inst{25-22} = n1{3-0};
-}
-class Enc_15707793 : OpcodeHexagon {
+class Enc_9b0bc1 : OpcodeHexagon {
+ bits <2> Pu4;
+ let Inst{6-5} = Pu4{1-0};
+ bits <5> Rt32;
+ let Inst{12-8} = Rt32{4-0};
bits <5> Rs32;
let Inst{20-16} = Rs32{4-0};
- bits <5> Gd32;
- let Inst{4-0} = Gd32{4-0};
+ bits <5> Rd32;
+ let Inst{4-0} = Rd32{4-0};
}
-class Enc_14689096 : OpcodeHexagon {
- bits <2> Ii;
- let Inst{13-13} = Ii{1-1};
- let Inst{6-6} = Ii{0-0};
- bits <6> II;
- let Inst{5-0} = II{5-0};
- bits <5> Ru32;
- let Inst{20-16} = Ru32{4-0};
+class Enc_ea4c54 : OpcodeHexagon {
+ bits <2> Pu4;
+ let Inst{6-5} = Pu4{1-0};
+ bits <5> Rs32;
+ let Inst{20-16} = Rs32{4-0};
bits <5> Rt32;
let Inst{12-8} = Rt32{4-0};
+ bits <5> Rd32;
+ let Inst{4-0} = Rd32{4-0};
}
-class Enc_9915754 : OpcodeHexagon {
- bits <6> Ii;
- let Inst{6-3} = Ii{5-2};
- bits <1> Mu2;
- let Inst{13-13} = Mu2{0-0};
+class Enc_b72622 : OpcodeHexagon {
+ bits <2> Ii;
+ let Inst{13-13} = Ii{1-1};
+ let Inst{5-5} = Ii{0-0};
+ bits <5> Rss32;
+ let Inst{20-16} = Rss32{4-0};
bits <5> Rt32;
let Inst{12-8} = Rt32{4-0};
- bits <5> Rx32;
- let Inst{20-16} = Rx32{4-0};
-}
-class Enc_7470998 : OpcodeHexagon {
- bits <5> Vu32;
- let Inst{12-8} = Vu32{4-0};
- bits <5> Vv32;
- let Inst{20-16} = Vv32{4-0};
- bits <2> Qx4;
- let Inst{1-0} = Qx4{1-0};
+ bits <5> Rxx32;
+ let Inst{4-0} = Rxx32{4-0};
}
-class Enc_11471622 : OpcodeHexagon {
- bits <5> Vu32;
- let Inst{12-8} = Vu32{4-0};
+class Enc_569cfe : OpcodeHexagon {
bits <5> Rt32;
let Inst{20-16} = Rt32{4-0};
- bits <5> Vdd32;
- let Inst{4-0} = Vdd32{4-0};
-}
-class Enc_14363183 : OpcodeHexagon {
- bits <2> Qv4;
- let Inst{23-22} = Qv4{1-0};
- bits <5> Vd32;
- let Inst{4-0} = Vd32{4-0};
+ bits <5> Vx32;
+ let Inst{4-0} = Vx32{4-0};
}
-class Enc_15816255 : OpcodeHexagon {
+class Enc_96ce4f : OpcodeHexagon {
+ bits <4> Ii;
+ let Inst{6-3} = Ii{3-0};
bits <1> Mu2;
let Inst{13-13} = Mu2{0-0};
- bits <5> Rtt32;
- let Inst{12-8} = Rtt32{4-0};
+ bits <3> Nt8;
+ let Inst{10-8} = Nt8{2-0};
bits <5> Rx32;
let Inst{20-16} = Rx32{4-0};
}
-class Enc_5321335 : OpcodeHexagon {
- bits <5> Vu32;
- let Inst{20-16} = Vu32{4-0};
- bits <5> Vv32;
- let Inst{12-8} = Vv32{4-0};
- bits <3> Rt8;
- let Inst{2-0} = Rt8{2-0};
- bits <4> Vdd16;
- let Inst{7-4} = Vdd16{3-0};
-}
-class Enc_12702821 : OpcodeHexagon {
+class Enc_143a3c : OpcodeHexagon {
+ bits <6> Ii;
+ let Inst{13-8} = Ii{5-0};
+ bits <6> II;
+ let Inst{23-21} = II{5-3};
+ let Inst{7-5} = II{2-0};
bits <5> Rss32;
let Inst{20-16} = Rss32{4-0};
- bits <5> Rtt32;
- let Inst{12-8} = Rtt32{4-0};
bits <5> Rxx32;
let Inst{4-0} = Rxx32{4-0};
}
-class Enc_449439 : OpcodeHexagon {
- bits <11> Ii;
- let Inst{26-25} = Ii{10-9};
- let Inst{13-5} = Ii{8-0};
+class Enc_57a33e : OpcodeHexagon {
+ bits <9> Ii;
+ let Inst{13-13} = Ii{8-8};
+ let Inst{7-3} = Ii{7-3};
+ bits <2> Pv4;
+ let Inst{1-0} = Pv4{1-0};
bits <5> Rs32;
let Inst{20-16} = Rs32{4-0};
- bits <5> Ryy32;
- let Inst{4-0} = Ryy32{4-0};
+ bits <5> Rtt32;
+ let Inst{12-8} = Rtt32{4-0};
}
-class Enc_2054304 : OpcodeHexagon {
+class Enc_311abd : OpcodeHexagon {
+ bits <5> Ii;
+ let Inst{12-8} = Ii{4-0};
bits <5> Rs32;
let Inst{20-16} = Rs32{4-0};
- bits <6> Sd64;
- let Inst{5-0} = Sd64{5-0};
+ bits <5> Rdd32;
+ let Inst{4-0} = Rdd32{4-0};
}
-class Enc_236434 : OpcodeHexagon {
+class Enc_a1640c : OpcodeHexagon {
bits <6> Ii;
- let Inst{22-21} = Ii{5-4};
- let Inst{13-13} = Ii{3-3};
- let Inst{7-5} = Ii{2-0};
- bits <5> Ru32;
- let Inst{4-0} = Ru32{4-0};
- bits <5> Rs32;
- let Inst{20-16} = Rs32{4-0};
- bits <5> Rd32;
- let Inst{12-8} = Rd32{4-0};
-}
-class Enc_5598813 : OpcodeHexagon {
- bits <4> Ii;
- let Inst{8-5} = Ii{3-0};
+ let Inst{13-8} = Ii{5-0};
+ bits <5> Rss32;
+ let Inst{20-16} = Rss32{4-0};
bits <5> Rd32;
let Inst{4-0} = Rd32{4-0};
- bits <5> Rx32;
- let Inst{20-16} = Rx32{4-0};
}
-class Enc_8409782 : OpcodeHexagon {
- bits <13> Ii;
- let Inst{26-25} = Ii{12-11};
- let Inst{13-13} = Ii{10-10};
- let Inst{7-0} = Ii{9-2};
+class Enc_de0214 : OpcodeHexagon {
+ bits <12> Ii;
+ let Inst{26-25} = Ii{11-10};
+ let Inst{13-5} = Ii{9-1};
bits <5> Rs32;
let Inst{20-16} = Rs32{4-0};
- bits <3> Nt8;
- let Inst{10-8} = Nt8{2-0};
+ bits <5> Rd32;
+ let Inst{4-0} = Rd32{4-0};
}
-class Enc_15182416 : OpcodeHexagon {
- bits <6> Ii;
- let Inst{20-16} = Ii{5-1};
- let Inst{8-8} = Ii{0-0};
- bits <2> Pt4;
- let Inst{10-9} = Pt4{1-0};
- bits <5> Rdd32;
- let Inst{4-0} = Rdd32{4-0};
+class Enc_a90628 : OpcodeHexagon {
+ bits <2> Qv4;
+ let Inst{23-22} = Qv4{1-0};
+ bits <5> Vu32;
+ let Inst{12-8} = Vu32{4-0};
+ bits <5> Vx32;
+ let Inst{4-0} = Vx32{4-0};
}
-class Enc_4501395 : OpcodeHexagon {
- bits <7> Ii;
- let Inst{6-3} = Ii{6-3};
- bits <1> Mu2;
- let Inst{13-13} = Mu2{0-0};
+class Enc_fda92c : OpcodeHexagon {
+ bits <17> Ii;
+ let Inst{26-25} = Ii{16-15};
+ let Inst{20-16} = Ii{14-10};
+ let Inst{13-13} = Ii{9-9};
+ let Inst{7-0} = Ii{8-1};
+ bits <5> Rt32;
+ let Inst{12-8} = Rt32{4-0};
+}
+class Enc_831a7d : OpcodeHexagon {
+ bits <5> Rss32;
+ let Inst{20-16} = Rss32{4-0};
bits <5> Rtt32;
let Inst{12-8} = Rtt32{4-0};
- bits <5> Rx32;
- let Inst{20-16} = Rx32{4-0};
-}
-class Enc_6039436 : OpcodeHexagon {
- bits <3> Qtt8;
- let Inst{2-0} = Qtt8{2-0};
- bits <5> Vuu32;
- let Inst{20-16} = Vuu32{4-0};
- bits <5> Vvv32;
- let Inst{12-8} = Vvv32{4-0};
- bits <5> Vdd32;
- let Inst{7-3} = Vdd32{4-0};
+ bits <5> Rxx32;
+ let Inst{4-0} = Rxx32{4-0};
+ bits <2> Pe4;
+ let Inst{6-5} = Pe4{1-0};
}
-class Enc_476163 : OpcodeHexagon {
- bits <5> Vu32;
- let Inst{20-16} = Vu32{4-0};
- bits <3> Rt8;
- let Inst{2-0} = Rt8{2-0};
- bits <5> Vd32;
- let Inst{7-3} = Vd32{4-0};
- bits <5> Vy32;
- let Inst{12-8} = Vy32{4-0};
+class Enc_11a146 : OpcodeHexagon {
+ bits <4> Ii;
+ let Inst{11-8} = Ii{3-0};
+ bits <5> Rss32;
+ let Inst{20-16} = Rss32{4-0};
+ bits <5> Rd32;
+ let Inst{4-0} = Rd32{4-0};
}
-class Enc_11281763 : OpcodeHexagon {
+class Enc_b15941 : OpcodeHexagon {
+ bits <4> Ii;
+ let Inst{6-3} = Ii{3-0};
bits <1> Mu2;
let Inst{13-13} = Mu2{0-0};
- bits <5> Vs32;
- let Inst{4-0} = Vs32{4-0};
- bits <5> Rx32;
- let Inst{20-16} = Rx32{4-0};
-}
-class Enc_9929262 : OpcodeHexagon {
- bits <16> Ii;
- let Inst{21-21} = Ii{15-15};
- let Inst{13-8} = Ii{14-9};
- let Inst{2-0} = Ii{8-6};
bits <5> Rt32;
- let Inst{20-16} = Rt32{4-0};
- bits <5> Vs32;
- let Inst{7-3} = Vs32{4-0};
-}
-class Enc_13174858 : OpcodeHexagon {
- bits <16> Ii;
- let Inst{21-21} = Ii{15-15};
- let Inst{13-8} = Ii{14-9};
- let Inst{2-0} = Ii{8-6};
- bits <5> Vs32;
- let Inst{7-3} = Vs32{4-0};
- bits <5> Rx32;
- let Inst{20-16} = Rx32{4-0};
-}
-class Enc_8437395 : OpcodeHexagon {
- bits <4> Ii;
- let Inst{13-13} = Ii{3-3};
- let Inst{10-8} = Ii{2-0};
- bits <5> Rt32;
- let Inst{20-16} = Rt32{4-0};
- bits <5> Vd32;
- let Inst{4-0} = Vd32{4-0};
-}
-class Enc_16578332 : OpcodeHexagon {
- bits <9> Ii;
- let Inst{10-8} = Ii{8-6};
- bits <5> Zdd8;
- let Inst{4-0} = Zdd8{4-0};
+ let Inst{12-8} = Rt32{4-0};
bits <5> Rx32;
let Inst{20-16} = Rx32{4-0};
}
-class Enc_12829314 : OpcodeHexagon {
+class Enc_b78edd : OpcodeHexagon {
bits <11> Ii;
let Inst{21-20} = Ii{10-9};
let Inst{7-1} = Ii{8-2};
bits <4> Rs16;
let Inst{19-16} = Rs16{3-0};
+ bits <4> n1;
+ let Inst{28-28} = n1{3-3};
+ let Inst{24-23} = n1{2-1};
+ let Inst{8-8} = n1{0-0};
}
-class Enc_9744403 : OpcodeHexagon {
- bits <5> Vu32;
- let Inst{13-9} = Vu32{4-0};
- bits <5> Vv32;
- let Inst{8-4} = Vv32{4-0};
- bits <4> Vdd16;
- let Inst{3-0} = Vdd16{3-0};
- bits <5> Rx32;
- let Inst{20-16} = Rx32{4-0};
-}
-class Enc_10968391 : OpcodeHexagon {
+class Enc_a27588 : OpcodeHexagon {
bits <11> Ii;
- let Inst{21-20} = Ii{10-9};
- let Inst{7-1} = Ii{8-2};
- bits <4> Rs16;
- let Inst{19-16} = Rs16{3-0};
- bits <7> n1;
- let Inst{28-28} = n1{6-6};
- let Inst{25-22} = n1{5-2};
- let Inst{13-13} = n1{1-1};
- let Inst{8-8} = n1{0-0};
+ let Inst{26-25} = Ii{10-9};
+ let Inst{13-5} = Ii{8-0};
+ bits <5> Rs32;
+ let Inst{20-16} = Rs32{4-0};
+ bits <5> Ryy32;
+ let Inst{4-0} = Ryy32{4-0};
}
-class Enc_64199 : OpcodeHexagon {
- bits <7> Ii;
- let Inst{8-4} = Ii{6-2};
- bits <4> Rd16;
- let Inst{3-0} = Rd16{3-0};
+class Enc_2a7b91 : OpcodeHexagon {
+ bits <6> Ii;
+ let Inst{20-16} = Ii{5-1};
+ let Inst{8-8} = Ii{0-0};
+ bits <2> Pt4;
+ let Inst{10-9} = Pt4{1-0};
+ bits <5> Rdd32;
+ let Inst{4-0} = Rdd32{4-0};
}
-class Enc_11039423 : OpcodeHexagon {
- bits <3> Ii;
- let Inst{10-8} = Ii{2-0};
+class Enc_b43b67 : OpcodeHexagon {
+ bits <5> Vu32;
+ let Inst{12-8} = Vu32{4-0};
+ bits <5> Vv32;
+ let Inst{20-16} = Vv32{4-0};
bits <5> Vd32;
let Inst{4-0} = Vd32{4-0};
- bits <5> Rx32;
- let Inst{20-16} = Rx32{4-0};
+ bits <2> Qx4;
+ let Inst{6-5} = Qx4{1-0};
}
-class Enc_6730375 : OpcodeHexagon {
+class Enc_4aca3a : OpcodeHexagon {
bits <11> Ii;
let Inst{21-20} = Ii{10-9};
let Inst{7-1} = Ii{8-2};
- bits <5> Rt32;
- let Inst{12-8} = Rt32{4-0};
bits <3> Ns8;
let Inst{18-16} = Ns8{2-0};
+ bits <3> n1;
+ let Inst{29-29} = n1{2-2};
+ let Inst{26-25} = n1{1-0};
}
-class Enc_16213761 : OpcodeHexagon {
- bits <5> Vu32;
- let Inst{12-8} = Vu32{4-0};
- bits <5> Vv32;
- let Inst{23-19} = Vv32{4-0};
- bits <3> Rt8;
- let Inst{18-16} = Rt8{2-0};
- bits <5> Vxx32;
- let Inst{4-0} = Vxx32{4-0};
-}
-class Enc_13204995 : OpcodeHexagon {
+class Enc_b38ffc : OpcodeHexagon {
bits <4> Ii;
let Inst{11-8} = Ii{3-0};
bits <4> Rs16;
@@ -2286,79 +1977,26 @@ class Enc_13204995 : OpcodeHexagon {
bits <4> Rt16;
let Inst{3-0} = Rt16{3-0};
}
-class Enc_13338314 : OpcodeHexagon {
- bits <4> Ii;
- let Inst{13-13} = Ii{3-3};
- let Inst{10-8} = Ii{2-0};
- bits <2> Pv4;
- let Inst{12-11} = Pv4{1-0};
- bits <5> Rt32;
- let Inst{20-16} = Rt32{4-0};
- bits <5> Vd32;
- let Inst{4-0} = Vd32{4-0};
-}
-class Enc_9920336 : OpcodeHexagon {
- bits <2> Ii;
- let Inst{13-13} = Ii{1-1};
- let Inst{7-7} = Ii{0-0};
- bits <2> Pv4;
- let Inst{6-5} = Pv4{1-0};
- bits <5> Rs32;
- let Inst{20-16} = Rs32{4-0};
- bits <5> Ru32;
- let Inst{12-8} = Ru32{4-0};
- bits <5> Rtt32;
- let Inst{4-0} = Rtt32{4-0};
-}
-class Enc_15380240 : OpcodeHexagon {
- bits <5> Vu32;
- let Inst{20-16} = Vu32{4-0};
- bits <3> Rt8;
- let Inst{2-0} = Rt8{2-0};
- bits <5> Vdd32;
- let Inst{7-3} = Vdd32{4-0};
- bits <5> Vy32;
- let Inst{12-8} = Vy32{4-0};
+class Enc_cda00a : OpcodeHexagon {
+ bits <12> Ii;
+ let Inst{19-16} = Ii{11-8};
+ let Inst{12-5} = Ii{7-0};
+ bits <2> Pu4;
+ let Inst{22-21} = Pu4{1-0};
+ bits <5> Rd32;
+ let Inst{4-0} = Rd32{4-0};
}
-class Enc_3296020 : OpcodeHexagon {
+class Enc_2fbf3c : OpcodeHexagon {
bits <3> Ii;
let Inst{10-8} = Ii{2-0};
- bits <5> Vs32;
- let Inst{4-0} = Vs32{4-0};
- bits <5> Rx32;
- let Inst{20-16} = Rx32{4-0};
-}
-class Enc_2428539 : OpcodeHexagon {
- bits <11> Ii;
- let Inst{21-20} = Ii{10-9};
- let Inst{7-1} = Ii{8-2};
bits <4> Rs16;
- let Inst{19-16} = Rs16{3-0};
- bits <4> n1;
- let Inst{28-28} = n1{3-3};
- let Inst{24-23} = n1{2-1};
- let Inst{8-8} = n1{0-0};
-}
-class Enc_10039393 : OpcodeHexagon {
- bits <3> Ii;
- let Inst{10-8} = Ii{2-0};
- bits <5> Vd32;
- let Inst{4-0} = Vd32{4-0};
- bits <5> Rx32;
- let Inst{20-16} = Rx32{4-0};
-}
-class Enc_9372046 : OpcodeHexagon {
- bits <4> Ii;
- let Inst{13-13} = Ii{3-3};
- let Inst{10-8} = Ii{2-0};
- bits <2> Pv4;
- let Inst{12-11} = Pv4{1-0};
- bits <5> Rt32;
- let Inst{20-16} = Rt32{4-0};
- bits <3> Os8;
- let Inst{2-0} = Os8{2-0};
+ let Inst{7-4} = Rs16{3-0};
+ bits <4> Rd16;
+ let Inst{3-0} = Rd16{3-0};
}
-class Enc_2901241 : OpcodeHexagon {
+class Enc_70b24b : OpcodeHexagon {
+ bits <6> Ii;
+ let Inst{8-5} = Ii{5-2};
bits <1> Mu2;
let Inst{13-13} = Mu2{0-0};
bits <5> Rdd32;
@@ -2366,424 +2004,294 @@ class Enc_2901241 : OpcodeHexagon {
bits <5> Rx32;
let Inst{20-16} = Rx32{4-0};
}
-class Enc_16145290 : OpcodeHexagon {
- bits <2> Ps4;
- let Inst{6-5} = Ps4{1-0};
- bits <5> Vu32;
- let Inst{12-8} = Vu32{4-0};
- bits <5> Vv32;
- let Inst{20-16} = Vv32{4-0};
- bits <5> Vdd32;
- let Inst{4-0} = Vdd32{4-0};
-}
-class Enc_13783220 : OpcodeHexagon {
- bits <5> Vu32;
- let Inst{12-8} = Vu32{4-0};
- bits <5> Rtt32;
- let Inst{20-16} = Rtt32{4-0};
- bits <5> Vd32;
- let Inst{4-0} = Vd32{4-0};
-}
-class Enc_12261611 : OpcodeHexagon {
- bits <1> Mu2;
- let Inst{13-13} = Mu2{0-0};
- bits <5> Ryy32;
- let Inst{4-0} = Ryy32{4-0};
- bits <5> Rx32;
- let Inst{20-16} = Rx32{4-0};
-}
-class Enc_6135183 : OpcodeHexagon {
- bits <4> Rs16;
- let Inst{7-4} = Rs16{3-0};
- bits <4> Rx16;
- let Inst{3-0} = Rx16{3-0};
-}
-class Enc_5523416 : OpcodeHexagon {
- bits <6> Ii;
- let Inst{13-8} = Ii{5-0};
+class Enc_2ae154 : OpcodeHexagon {
bits <5> Rs32;
let Inst{20-16} = Rs32{4-0};
- bits <5> Rd32;
- let Inst{4-0} = Rd32{4-0};
+ bits <5> Rt32;
+ let Inst{12-8} = Rt32{4-0};
+ bits <5> Rx32;
+ let Inst{4-0} = Rx32{4-0};
}
-class Enc_13472494 : OpcodeHexagon {
- bits <10> Ii;
- let Inst{21-21} = Ii{9-9};
- let Inst{13-5} = Ii{8-0};
- bits <5> Rs32;
- let Inst{20-16} = Rs32{4-0};
- bits <5> Rd32;
- let Inst{4-0} = Rd32{4-0};
+class Enc_50b5ac : OpcodeHexagon {
+ bits <6> Ii;
+ let Inst{17-16} = Ii{5-4};
+ let Inst{6-3} = Ii{3-0};
+ bits <2> Pv4;
+ let Inst{1-0} = Pv4{1-0};
+ bits <5> Rtt32;
+ let Inst{12-8} = Rtt32{4-0};
}
-class Enc_16303398 : OpcodeHexagon {
+class Enc_2ea740 : OpcodeHexagon {
bits <4> Ii;
- let Inst{8-5} = Ii{3-0};
- bits <1> Mu2;
- let Inst{13-13} = Mu2{0-0};
- bits <5> Rd32;
- let Inst{4-0} = Rd32{4-0};
- bits <5> Rx32;
- let Inst{20-16} = Rx32{4-0};
-}
-class Enc_3494181 : OpcodeHexagon {
- bits <3> Ii;
- let Inst{7-5} = Ii{2-0};
+ let Inst{13-13} = Ii{3-3};
+ let Inst{10-8} = Ii{2-0};
+ bits <2> Qv4;
+ let Inst{12-11} = Qv4{1-0};
bits <5> Rt32;
- let Inst{12-8} = Rt32{4-0};
+ let Inst{20-16} = Rt32{4-0};
+ bits <5> Vs32;
+ let Inst{4-0} = Vs32{4-0};
+}
+class Enc_08d755 : OpcodeHexagon {
+ bits <8> Ii;
+ let Inst{12-5} = Ii{7-0};
bits <5> Rs32;
let Inst{20-16} = Rs32{4-0};
- bits <5> Rd32;
- let Inst{4-0} = Rd32{4-0};
+ bits <2> Pd4;
+ let Inst{1-0} = Pd4{1-0};
}
-class Enc_13983714 : OpcodeHexagon {
+class Enc_1178da : OpcodeHexagon {
+ bits <3> Ii;
+ let Inst{7-5} = Ii{2-0};
bits <5> Vu32;
let Inst{12-8} = Vu32{4-0};
bits <5> Vv32;
let Inst{20-16} = Vv32{4-0};
- bits <2> Qd4;
- let Inst{1-0} = Qd4{1-0};
+ bits <5> Vxx32;
+ let Inst{4-0} = Vxx32{4-0};
}
-class Enc_931653 : OpcodeHexagon {
- bits <7> Ii;
- let Inst{8-5} = Ii{6-3};
+class Enc_8dbe85 : OpcodeHexagon {
bits <1> Mu2;
let Inst{13-13} = Mu2{0-0};
- bits <5> Rdd32;
- let Inst{4-0} = Rdd32{4-0};
+ bits <3> Nt8;
+ let Inst{10-8} = Nt8{2-0};
bits <5> Rx32;
let Inst{20-16} = Rx32{4-0};
}
-class Enc_7622936 : OpcodeHexagon {
- bits <5> Vu32;
- let Inst{20-16} = Vu32{4-0};
- bits <3> Rt8;
- let Inst{2-0} = Rt8{2-0};
- bits <5> Vxx32;
- let Inst{7-3} = Vxx32{4-0};
- bits <5> Vy32;
- let Inst{12-8} = Vy32{4-0};
+class Enc_5a18b3 : OpcodeHexagon {
+ bits <11> Ii;
+ let Inst{21-20} = Ii{10-9};
+ let Inst{7-1} = Ii{8-2};
+ bits <3> Ns8;
+ let Inst{18-16} = Ns8{2-0};
+ bits <5> n1;
+ let Inst{29-29} = n1{4-4};
+ let Inst{26-25} = n1{3-2};
+ let Inst{22-22} = n1{1-1};
+ let Inst{13-13} = n1{0-0};
}
-class Enc_8773155 : OpcodeHexagon {
- bits <8> Ii;
- let Inst{12-7} = Ii{7-2};
+class Enc_14d27a : OpcodeHexagon {
bits <5> II;
- let Inst{4-0} = II{4-0};
- bits <5> Rs32;
- let Inst{20-16} = Rs32{4-0};
-}
-class Enc_5401217 : OpcodeHexagon {
+ let Inst{12-8} = II{4-0};
bits <11> Ii;
let Inst{21-20} = Ii{10-9};
let Inst{7-1} = Ii{8-2};
bits <4> Rs16;
let Inst{19-16} = Rs16{3-0};
- bits <3> n1;
- let Inst{28-28} = n1{2-2};
- let Inst{24-23} = n1{1-0};
}
-class Enc_6736678 : OpcodeHexagon {
- bits <8> Ii;
- let Inst{12-5} = Ii{7-0};
+class Enc_a05677 : OpcodeHexagon {
+ bits <5> Ii;
+ let Inst{12-8} = Ii{4-0};
bits <5> Rs32;
let Inst{20-16} = Rs32{4-0};
- bits <2> Pd4;
- let Inst{1-0} = Pd4{1-0};
+ bits <5> Rd32;
+ let Inst{4-0} = Rd32{4-0};
}
-class Enc_3457570 : OpcodeHexagon {
- bits <3> Ii;
- let Inst{7-5} = Ii{2-0};
+class Enc_f0cca7 : OpcodeHexagon {
+ bits <8> Ii;
+ let Inst{12-5} = Ii{7-0};
+ bits <6> II;
+ let Inst{20-16} = II{5-1};
+ let Inst{13-13} = II{0-0};
+ bits <5> Rdd32;
+ let Inst{4-0} = Rdd32{4-0};
+}
+class Enc_500cb0 : OpcodeHexagon {
bits <5> Vu32;
let Inst{12-8} = Vu32{4-0};
- bits <5> Vv32;
- let Inst{20-16} = Vv32{4-0};
bits <5> Vxx32;
let Inst{4-0} = Vxx32{4-0};
}
-class Enc_3813442 : OpcodeHexagon {
+class Enc_7e5a82 : OpcodeHexagon {
bits <5> Ii;
- let Inst{6-3} = Ii{4-1};
- bits <2> Pv4;
- let Inst{1-0} = Pv4{1-0};
- bits <3> Nt8;
- let Inst{10-8} = Nt8{2-0};
- bits <5> Rx32;
- let Inst{20-16} = Rx32{4-0};
-}
-class Enc_3135259 : OpcodeHexagon {
- bits <3> Ii;
- let Inst{10-8} = Ii{2-0};
- bits <4> Rs16;
- let Inst{7-4} = Rs16{3-0};
- bits <4> Rd16;
- let Inst{3-0} = Rd16{3-0};
+ let Inst{12-8} = Ii{4-0};
+ bits <5> Rss32;
+ let Inst{20-16} = Rss32{4-0};
+ bits <5> Rdd32;
+ let Inst{4-0} = Rdd32{4-0};
}
-class Enc_5486172 : OpcodeHexagon {
- bits <2> Ii;
- let Inst{13-13} = Ii{1-1};
- let Inst{7-7} = Ii{0-0};
- bits <5> Rs32;
- let Inst{20-16} = Rs32{4-0};
- bits <5> Ru32;
- let Inst{12-8} = Ru32{4-0};
- bits <3> Nt8;
- let Inst{2-0} = Nt8{2-0};
+class Enc_12b6e9 : OpcodeHexagon {
+ bits <4> Ii;
+ let Inst{11-8} = Ii{3-0};
+ bits <5> Rss32;
+ let Inst{20-16} = Rss32{4-0};
+ bits <5> Rdd32;
+ let Inst{4-0} = Rdd32{4-0};
}
-class Enc_11081334 : OpcodeHexagon {
- bits <16> Ii;
- let Inst{21-21} = Ii{15-15};
- let Inst{13-8} = Ii{14-9};
- let Inst{2-0} = Ii{8-6};
- bits <5> Rt32;
- let Inst{20-16} = Rt32{4-0};
- bits <5> Vss32;
- let Inst{7-3} = Vss32{4-0};
+class Enc_6f70ca : OpcodeHexagon {
+ bits <8> Ii;
+ let Inst{8-4} = Ii{7-3};
}
-class Enc_9470751 : OpcodeHexagon {
- bits <4> Ii;
- let Inst{13-13} = Ii{3-3};
- let Inst{10-8} = Ii{2-0};
- bits <2> Pv4;
- let Inst{12-11} = Pv4{1-0};
+class Enc_7222b7 : OpcodeHexagon {
bits <5> Rt32;
let Inst{20-16} = Rt32{4-0};
- bits <5> Vs32;
- let Inst{4-0} = Vs32{4-0};
+ bits <2> Qd4;
+ let Inst{1-0} = Qd4{1-0};
}
-class Enc_2683366 : OpcodeHexagon {
- bits <3> Quu8;
- let Inst{10-8} = Quu8{2-0};
- bits <5> Rt32;
- let Inst{20-16} = Rt32{4-0};
- bits <3> Qdd8;
- let Inst{5-3} = Qdd8{2-0};
+class Enc_e3b0c4 : OpcodeHexagon {
}
-class Enc_15830826 : OpcodeHexagon {
- bits <14> Ii;
- let Inst{10-0} = Ii{13-3};
+class Enc_a255dc : OpcodeHexagon {
+ bits <3> Ii;
+ let Inst{10-8} = Ii{2-0};
+ bits <5> Vd32;
+ let Inst{4-0} = Vd32{4-0};
+ bits <5> Rx32;
+ let Inst{20-16} = Rx32{4-0};
}
-class Enc_4967902 : OpcodeHexagon {
- bits <7> Ii;
- let Inst{12-7} = Ii{6-1};
- bits <6> II;
- let Inst{13-13} = II{5-5};
- let Inst{4-0} = II{4-0};
- bits <2> Pv4;
- let Inst{6-5} = Pv4{1-0};
+class Enc_cb4b4e : OpcodeHexagon {
+ bits <2> Pu4;
+ let Inst{6-5} = Pu4{1-0};
bits <5> Rs32;
let Inst{20-16} = Rs32{4-0};
-}
-class Enc_14287645 : OpcodeHexagon {
- bits <5> Rss32;
- let Inst{20-16} = Rss32{4-0};
bits <5> Rt32;
let Inst{12-8} = Rt32{4-0};
- bits <5> Rd32;
- let Inst{4-0} = Rd32{4-0};
-}
-class Enc_8324216 : OpcodeHexagon {
- bits <2> Ps4;
- let Inst{17-16} = Ps4{1-0};
- bits <2> Pt4;
- let Inst{9-8} = Pt4{1-0};
- bits <2> Pd4;
- let Inst{1-0} = Pd4{1-0};
-}
-class Enc_913538 : OpcodeHexagon {
- bits <5> Vu32;
- let Inst{12-8} = Vu32{4-0};
- bits <5> Rt32;
- let Inst{20-16} = Rt32{4-0};
- bits <3> Qd8;
- let Inst{5-3} = Qd8{2-0};
-}
-class Enc_16311032 : OpcodeHexagon {
- bits <5> Rs32;
- let Inst{20-16} = Rs32{4-0};
- bits <5> Rtt32;
- let Inst{12-8} = Rtt32{4-0};
- bits <5> Rx32;
- let Inst{4-0} = Rx32{4-0};
-}
-class Enc_9864697 : OpcodeHexagon {
- bits <8> Ii;
- let Inst{12-5} = Ii{7-0};
- bits <6> II;
- let Inst{20-16} = II{5-1};
- let Inst{13-13} = II{0-0};
bits <5> Rdd32;
let Inst{4-0} = Rdd32{4-0};
}
-class Enc_11205051 : OpcodeHexagon {
- bits <6> Ii;
- let Inst{11-8} = Ii{5-2};
- bits <4> Rs16;
- let Inst{7-4} = Rs16{3-0};
- bits <4> Rt16;
- let Inst{3-0} = Rt16{3-0};
-}
-class Enc_5611087 : OpcodeHexagon {
- bits <7> Ii;
- let Inst{8-5} = Ii{6-3};
- bits <2> Pt4;
- let Inst{10-9} = Pt4{1-0};
+class Enc_9cdba7 : OpcodeHexagon {
+ bits <8> Ii;
+ let Inst{12-5} = Ii{7-0};
+ bits <5> Rs32;
+ let Inst{20-16} = Rs32{4-0};
bits <5> Rdd32;
let Inst{4-0} = Rdd32{4-0};
- bits <5> Rx32;
- let Inst{20-16} = Rx32{4-0};
}
-class Enc_10915758 : OpcodeHexagon {
- bits <5> Ii;
- let Inst{6-3} = Ii{4-1};
- bits <1> Mu2;
- let Inst{13-13} = Mu2{0-0};
- bits <5> Rt32;
- let Inst{12-8} = Rt32{4-0};
- bits <5> Rx32;
- let Inst{20-16} = Rx32{4-0};
-}
-class Enc_8943121 : OpcodeHexagon {
+class Enc_5cd7e9 : OpcodeHexagon {
+ bits <12> Ii;
+ let Inst{26-25} = Ii{11-10};
+ let Inst{13-5} = Ii{9-1};
bits <5> Rs32;
let Inst{20-16} = Rs32{4-0};
- bits <5> Rtt32;
- let Inst{12-8} = Rtt32{4-0};
+ bits <5> Ryy32;
+ let Inst{4-0} = Ryy32{4-0};
}
-class Enc_1539665 : OpcodeHexagon {
- bits <5> Cs32;
- let Inst{20-16} = Cs32{4-0};
- bits <5> Rd32;
- let Inst{4-0} = Rd32{4-0};
+class Enc_454a26 : OpcodeHexagon {
+ bits <2> Pt4;
+ let Inst{9-8} = Pt4{1-0};
+ bits <2> Ps4;
+ let Inst{17-16} = Ps4{1-0};
+ bits <2> Pd4;
+ let Inst{1-0} = Pd4{1-0};
}
-class Enc_8479583 : OpcodeHexagon {
+class Enc_a6853f : OpcodeHexagon {
bits <11> Ii;
let Inst{21-20} = Ii{10-9};
let Inst{7-1} = Ii{8-2};
bits <3> Ns8;
let Inst{18-16} = Ns8{2-0};
- bits <5> n1;
- let Inst{29-29} = n1{4-4};
- let Inst{26-25} = n1{3-2};
- let Inst{23-23} = n1{1-1};
+ bits <6> n1;
+ let Inst{29-29} = n1{5-5};
+ let Inst{26-25} = n1{4-3};
+ let Inst{23-22} = n1{2-1};
let Inst{13-13} = n1{0-0};
}
-class Enc_313333 : OpcodeHexagon {
+class Enc_c175d0 : OpcodeHexagon {
+ bits <4> Ii;
+ let Inst{11-8} = Ii{3-0};
+ bits <4> Rs16;
+ let Inst{7-4} = Rs16{3-0};
+ bits <4> Rd16;
+ let Inst{3-0} = Rd16{3-0};
+}
+class Enc_895bd9 : OpcodeHexagon {
+ bits <2> Qu4;
+ let Inst{9-8} = Qu4{1-0};
bits <5> Rt32;
let Inst{20-16} = Rt32{4-0};
bits <5> Vx32;
let Inst{4-0} = Vx32{4-0};
}
-class Enc_11544269 : OpcodeHexagon {
- bits <11> Ii;
- let Inst{21-20} = Ii{10-9};
- let Inst{7-1} = Ii{8-2};
- bits <3> Ns8;
- let Inst{18-16} = Ns8{2-0};
- bits <4> n1;
- let Inst{29-29} = n1{3-3};
- let Inst{26-25} = n1{2-1};
- let Inst{13-13} = n1{0-0};
+class Enc_ea23e4 : OpcodeHexagon {
+ bits <5> Rtt32;
+ let Inst{12-8} = Rtt32{4-0};
+ bits <5> Rss32;
+ let Inst{20-16} = Rss32{4-0};
+ bits <5> Rdd32;
+ let Inst{4-0} = Rdd32{4-0};
}
-class Enc_9018141 : OpcodeHexagon {
- bits <5> Rs32;
- let Inst{20-16} = Rs32{4-0};
- bits <5> Cd32;
- let Inst{4-0} = Cd32{4-0};
+class Enc_4dc228 : OpcodeHexagon {
+ bits <9> Ii;
+ let Inst{12-8} = Ii{8-4};
+ let Inst{4-3} = Ii{3-2};
+ bits <10> II;
+ let Inst{20-16} = II{9-5};
+ let Inst{7-5} = II{4-2};
+ let Inst{1-0} = II{1-0};
+}
+class Enc_10bc21 : OpcodeHexagon {
+ bits <4> Ii;
+ let Inst{6-3} = Ii{3-0};
+ bits <5> Rt32;
+ let Inst{12-8} = Rt32{4-0};
+ bits <5> Rx32;
+ let Inst{20-16} = Rx32{4-0};
+}
+class Enc_1aaec1 : OpcodeHexagon {
+ bits <3> Ii;
+ let Inst{10-8} = Ii{2-0};
+ bits <3> Os8;
+ let Inst{2-0} = Os8{2-0};
+ bits <5> Rx32;
+ let Inst{20-16} = Rx32{4-0};
}
-class Enc_6152036 : OpcodeHexagon {
+class Enc_329361 : OpcodeHexagon {
+ bits <2> Pu4;
+ let Inst{6-5} = Pu4{1-0};
bits <5> Rss32;
let Inst{20-16} = Rss32{4-0};
- bits <5> Gdd32;
- let Inst{4-0} = Gdd32{4-0};
-}
-class Enc_1954437 : OpcodeHexagon {
- bits <6> Sss64;
- let Inst{21-16} = Sss64{5-0};
+ bits <5> Rtt32;
+ let Inst{12-8} = Rtt32{4-0};
bits <5> Rdd32;
let Inst{4-0} = Rdd32{4-0};
}
-class Enc_3742184 : OpcodeHexagon {
+class Enc_d2c7f1 : OpcodeHexagon {
+ bits <5> Rtt32;
+ let Inst{12-8} = Rtt32{4-0};
bits <5> Rss32;
let Inst{20-16} = Rss32{4-0};
- bits <5> Rd32;
- let Inst{4-0} = Rd32{4-0};
+ bits <5> Rdd32;
+ let Inst{4-0} = Rdd32{4-0};
+ bits <2> Pe4;
+ let Inst{6-5} = Pe4{1-0};
}
-class Enc_1835415 : OpcodeHexagon {
+class Enc_3680c2 : OpcodeHexagon {
bits <7> Ii;
- let Inst{10-5} = Ii{6-1};
- bits <2> Pt4;
- let Inst{12-11} = Pt4{1-0};
- bits <5> Rs32;
- let Inst{20-16} = Rs32{4-0};
- bits <5> Rd32;
- let Inst{4-0} = Rd32{4-0};
+ let Inst{11-5} = Ii{6-0};
+ bits <5> Rss32;
+ let Inst{20-16} = Rss32{4-0};
+ bits <2> Pd4;
+ let Inst{1-0} = Pd4{1-0};
}
-class Enc_1085466 : OpcodeHexagon {
- bits <5> Rt32;
- let Inst{20-16} = Rt32{4-0};
- bits <5> Vdd32;
- let Inst{7-3} = Vdd32{4-0};
+class Enc_1ef990 : OpcodeHexagon {
+ bits <2> Pv4;
+ let Inst{12-11} = Pv4{1-0};
+ bits <1> Mu2;
+ let Inst{13-13} = Mu2{0-0};
+ bits <5> Vs32;
+ let Inst{4-0} = Vs32{4-0};
+ bits <5> Rx32;
+ let Inst{20-16} = Rx32{4-0};
}
-class Enc_13150110 : OpcodeHexagon {
- bits <11> Ii;
- let Inst{26-25} = Ii{10-9};
- let Inst{13-13} = Ii{8-8};
- let Inst{7-0} = Ii{7-0};
+class Enc_e957fb : OpcodeHexagon {
+ bits <12> Ii;
+ let Inst{26-25} = Ii{11-10};
+ let Inst{13-13} = Ii{9-9};
+ let Inst{7-0} = Ii{8-1};
bits <5> Rs32;
let Inst{20-16} = Rs32{4-0};
bits <5> Rt32;
let Inst{12-8} = Rt32{4-0};
}
-class Enc_6772177 : OpcodeHexagon {
- bits <5> Zu8;
- let Inst{12-8} = Zu8{4-0};
- bits <5> Zd8;
- let Inst{4-0} = Zd8{4-0};
-}
-class Enc_6616512 : OpcodeHexagon {
- bits <16> Ii;
- let Inst{21-21} = Ii{15-15};
- let Inst{13-8} = Ii{14-9};
- let Inst{2-0} = Ii{8-6};
+class Enc_c9e3bc : OpcodeHexagon {
+ bits <4> Ii;
+ let Inst{13-13} = Ii{3-3};
+ let Inst{10-8} = Ii{2-0};
bits <5> Rt32;
let Inst{20-16} = Rt32{4-0};
- bits <5> Vdd32;
- let Inst{7-3} = Vdd32{4-0};
-}
-class Enc_1886960 : OpcodeHexagon {
- bits <16> Ii;
- let Inst{26-25} = Ii{15-14};
- let Inst{20-16} = Ii{13-9};
- let Inst{13-5} = Ii{8-0};
- bits <5> Rd32;
- let Inst{4-0} = Rd32{4-0};
-}
-class Enc_2835415 : OpcodeHexagon {
- bits <8> Ii;
- let Inst{10-5} = Ii{7-2};
- bits <2> Pt4;
- let Inst{12-11} = Pt4{1-0};
- bits <5> Rs32;
- let Inst{20-16} = Rs32{4-0};
- bits <5> Rd32;
- let Inst{4-0} = Rd32{4-0};
-}
-class Enc_14024197 : OpcodeHexagon {
- bits <5> Vu32;
- let Inst{12-8} = Vu32{4-0};
- bits <5> Rtt32;
- let Inst{20-16} = Rtt32{4-0};
- bits <5> Vxx32;
- let Inst{4-0} = Vxx32{4-0};
-}
-class Enc_12297800 : OpcodeHexagon {
- bits <18> Ii;
- let Inst{26-25} = Ii{17-16};
- let Inst{20-16} = Ii{15-11};
- let Inst{13-13} = Ii{10-10};
- let Inst{7-0} = Ii{9-2};
- bits <3> Nt8;
- let Inst{10-8} = Nt8{2-0};
+ bits <5> Vs32;
+ let Inst{4-0} = Vs32{4-0};
}
-class Enc_7254313 : OpcodeHexagon {
+class Enc_2e1979 : OpcodeHexagon {
bits <2> Ii;
let Inst{13-13} = Ii{1-1};
let Inst{7-7} = Ii{0-0};
@@ -2793,20 +2301,12 @@ class Enc_7254313 : OpcodeHexagon {
let Inst{20-16} = Rs32{4-0};
bits <5> Rt32;
let Inst{12-8} = Rt32{4-0};
- bits <5> Rdd32;
- let Inst{4-0} = Rdd32{4-0};
-}
-class Enc_677558 : OpcodeHexagon {
- bits <9> Ii;
- let Inst{10-5} = Ii{8-3};
- bits <2> Pt4;
- let Inst{12-11} = Pt4{1-0};
- bits <5> Rs32;
- let Inst{20-16} = Rs32{4-0};
- bits <5> Rdd32;
- let Inst{4-0} = Rdd32{4-0};
+ bits <5> Rd32;
+ let Inst{4-0} = Rd32{4-0};
}
-class Enc_6223403 : OpcodeHexagon {
+class Enc_0b2e5b : OpcodeHexagon {
+ bits <3> Ii;
+ let Inst{7-5} = Ii{2-0};
bits <5> Vu32;
let Inst{12-8} = Vu32{4-0};
bits <5> Vv32;
@@ -2814,220 +2314,178 @@ class Enc_6223403 : OpcodeHexagon {
bits <5> Vd32;
let Inst{4-0} = Vd32{4-0};
}
-class Enc_674613 : OpcodeHexagon {
+class Enc_d483b9 : OpcodeHexagon {
+ bits <1> Ii;
+ let Inst{5-5} = Ii{0-0};
bits <5> Vuu32;
- let Inst{20-16} = Vuu32{4-0};
- bits <5> Vdd32;
- let Inst{7-3} = Vdd32{4-0};
-}
-class Enc_16479122 : OpcodeHexagon {
- bits <8> Ii;
- let Inst{7-3} = Ii{7-3};
- bits <3> Rdd8;
- let Inst{2-0} = Rdd8{2-0};
-}
-class Enc_11704059 : OpcodeHexagon {
- bits <5> Rs32;
- let Inst{20-16} = Rs32{4-0};
+ let Inst{12-8} = Vuu32{4-0};
+ bits <5> Rt32;
+ let Inst{20-16} = Rt32{4-0};
+ bits <5> Vxx32;
+ let Inst{4-0} = Vxx32{4-0};
}
-class Enc_9165078 : OpcodeHexagon {
- bits <9> Ii;
- let Inst{8-3} = Ii{8-3};
- bits <3> Rtt8;
- let Inst{2-0} = Rtt8{2-0};
+class Enc_51635c : OpcodeHexagon {
+ bits <7> Ii;
+ let Inst{8-4} = Ii{6-2};
+ bits <4> Rd16;
+ let Inst{3-0} = Rd16{3-0};
}
-class Enc_15376009 : OpcodeHexagon {
+class Enc_e26546 : OpcodeHexagon {
bits <5> Ii;
- let Inst{8-5} = Ii{4-1};
- bits <5> Rd32;
- let Inst{4-0} = Rd32{4-0};
+ let Inst{6-3} = Ii{4-1};
+ bits <3> Nt8;
+ let Inst{10-8} = Nt8{2-0};
bits <5> Rx32;
let Inst{20-16} = Rx32{4-0};
}
-class Enc_8838398 : OpcodeHexagon {
- bits <4> Ii;
- let Inst{21-21} = Ii{3-3};
- let Inst{7-5} = Ii{2-0};
- bits <6> II;
- let Inst{13-8} = II{5-0};
- bits <5> Rs32;
- let Inst{20-16} = Rs32{4-0};
- bits <5> Rx32;
- let Inst{4-0} = Rx32{4-0};
-}
-class Enc_2328527 : OpcodeHexagon {
- bits <5> Vu32;
- let Inst{12-8} = Vu32{4-0};
- bits <5> Vv32;
- let Inst{20-16} = Vv32{4-0};
- bits <5> Vx32;
- let Inst{4-0} = Vx32{4-0};
-}
-class Enc_1451363 : OpcodeHexagon {
- bits <4> Rd16;
- let Inst{3-0} = Rd16{3-0};
-}
-class Enc_4030179 : OpcodeHexagon {
- bits <5> Rs32;
- let Inst{20-16} = Rs32{4-0};
- bits <5> Rdd32;
- let Inst{4-0} = Rdd32{4-0};
+class Enc_70fb07 : OpcodeHexagon {
+ bits <6> Ii;
+ let Inst{13-8} = Ii{5-0};
+ bits <5> Rss32;
+ let Inst{20-16} = Rss32{4-0};
+ bits <5> Rxx32;
+ let Inst{4-0} = Rxx32{4-0};
}
-class Enc_13770697 : OpcodeHexagon {
+class Enc_277737 : OpcodeHexagon {
+ bits <8> Ii;
+ let Inst{22-21} = Ii{7-6};
+ let Inst{13-13} = Ii{5-5};
+ let Inst{7-5} = Ii{4-2};
bits <5> Ru32;
let Inst{4-0} = Ru32{4-0};
bits <5> Rs32;
let Inst{20-16} = Rs32{4-0};
- bits <5> Ry32;
- let Inst{12-8} = Ry32{4-0};
-}
-class Enc_12212978 : OpcodeHexagon {
- bits <4> Ii;
- let Inst{8-5} = Ii{3-0};
- bits <2> Pt4;
- let Inst{10-9} = Pt4{1-0};
bits <5> Rd32;
- let Inst{4-0} = Rd32{4-0};
- bits <5> Rx32;
- let Inst{20-16} = Rx32{4-0};
+ let Inst{12-8} = Rd32{4-0};
+}
+class Enc_5c124a : OpcodeHexagon {
+ bits <19> Ii;
+ let Inst{26-25} = Ii{18-17};
+ let Inst{20-16} = Ii{16-12};
+ let Inst{13-13} = Ii{11-11};
+ let Inst{7-0} = Ii{10-3};
+ bits <5> Rtt32;
+ let Inst{12-8} = Rtt32{4-0};
}
-class Enc_12665927 : OpcodeHexagon {
+class Enc_928ca1 : OpcodeHexagon {
bits <1> Mu2;
let Inst{13-13} = Mu2{0-0};
- bits <5> Vdd32;
- let Inst{7-3} = Vdd32{4-0};
+ bits <5> Rtt32;
+ let Inst{12-8} = Rtt32{4-0};
bits <5> Rx32;
let Inst{20-16} = Rx32{4-0};
}
-class Enc_2082956 : OpcodeHexagon {
- bits <32> Ii;
- let Inst{27-16} = Ii{31-20};
- let Inst{13-0} = Ii{19-6};
+class Enc_da664b : OpcodeHexagon {
+ bits <2> Ii;
+ let Inst{13-13} = Ii{1-1};
+ let Inst{7-7} = Ii{0-0};
+ bits <5> Rs32;
+ let Inst{20-16} = Rs32{4-0};
+ bits <5> Rt32;
+ let Inst{12-8} = Rt32{4-0};
+ bits <5> Rd32;
+ let Inst{4-0} = Rd32{4-0};
+}
+class Enc_7b7ba8 : OpcodeHexagon {
+ bits <2> Qu4;
+ let Inst{9-8} = Qu4{1-0};
+ bits <5> Rt32;
+ let Inst{20-16} = Rt32{4-0};
+ bits <5> Vd32;
+ let Inst{4-0} = Vd32{4-0};
+}
+class Enc_47ee5e : OpcodeHexagon {
+ bits <2> Ii;
+ let Inst{13-13} = Ii{1-1};
+ let Inst{7-7} = Ii{0-0};
+ bits <2> Pv4;
+ let Inst{6-5} = Pv4{1-0};
+ bits <5> Rs32;
+ let Inst{20-16} = Rs32{4-0};
+ bits <5> Ru32;
+ let Inst{12-8} = Ru32{4-0};
+ bits <3> Nt8;
+ let Inst{2-0} = Nt8{2-0};
}
-class Enc_220949 : OpcodeHexagon {
+class Enc_8bcba4 : OpcodeHexagon {
+ bits <6> II;
+ let Inst{5-0} = II{5-0};
+ bits <5> Rt32;
+ let Inst{12-8} = Rt32{4-0};
+ bits <5> Re32;
+ let Inst{20-16} = Re32{4-0};
+}
+class Enc_3a2484 : OpcodeHexagon {
bits <11> Ii;
let Inst{21-20} = Ii{10-9};
let Inst{7-1} = Ii{8-2};
bits <4> Rs16;
let Inst{19-16} = Rs16{3-0};
- bits <5> n1;
- let Inst{28-28} = n1{4-4};
- let Inst{25-23} = n1{3-1};
+ bits <4> n1;
+ let Inst{28-28} = n1{3-3};
+ let Inst{24-23} = n1{2-1};
let Inst{13-13} = n1{0-0};
}
-class Enc_9939385 : OpcodeHexagon {
- bits <9> Ii;
- let Inst{12-8} = Ii{8-4};
- let Inst{4-3} = Ii{3-2};
- bits <10> II;
- let Inst{20-16} = II{9-5};
- let Inst{7-5} = II{4-2};
- let Inst{1-0} = II{1-0};
-}
-class Enc_2117024 : OpcodeHexagon {
- bits <8> Ii;
- let Inst{12-8} = Ii{7-3};
- let Inst{4-2} = Ii{2-0};
- bits <5> Rx32;
- let Inst{20-16} = Rx32{4-0};
-}
-class Enc_8390029 : OpcodeHexagon {
- bits <5> Vuu32;
- let Inst{20-16} = Vuu32{4-0};
- bits <5> Vv32;
- let Inst{12-8} = Vv32{4-0};
+class Enc_a5ed8a : OpcodeHexagon {
+ bits <5> Rt32;
+ let Inst{20-16} = Rt32{4-0};
bits <5> Vd32;
- let Inst{7-3} = Vd32{4-0};
+ let Inst{4-0} = Vd32{4-0};
}
-class Enc_10989558 : OpcodeHexagon {
- bits <5> Vu32;
- let Inst{20-16} = Vu32{4-0};
- bits <5> Vd32;
- let Inst{7-3} = Vd32{4-0};
+class Enc_cb9321 : OpcodeHexagon {
+ bits <16> Ii;
+ let Inst{27-21} = Ii{15-9};
+ let Inst{13-5} = Ii{8-0};
+ bits <5> Rs32;
+ let Inst{20-16} = Rs32{4-0};
+ bits <5> Rd32;
+ let Inst{4-0} = Rd32{4-0};
+}
+class Enc_668704 : OpcodeHexagon {
+ bits <11> Ii;
+ let Inst{21-20} = Ii{10-9};
+ let Inst{7-1} = Ii{8-2};
+ bits <4> Rs16;
+ let Inst{19-16} = Rs16{3-0};
+ bits <5> n1;
+ let Inst{28-28} = n1{4-4};
+ let Inst{25-22} = n1{3-0};
}
-class Enc_5972412 : OpcodeHexagon {
+class Enc_a7341a : OpcodeHexagon {
bits <5> Vu32;
let Inst{12-8} = Vu32{4-0};
bits <5> Vv32;
let Inst{20-16} = Vv32{4-0};
- bits <5> Vxx32;
- let Inst{4-0} = Vxx32{4-0};
-}
-class Enc_12851489 : OpcodeHexagon {
- bits <1> Mu2;
- let Inst{13-13} = Mu2{0-0};
- bits <5> Vss32;
- let Inst{7-3} = Vss32{4-0};
- bits <5> Rx32;
- let Inst{20-16} = Rx32{4-0};
+ bits <5> Vx32;
+ let Inst{4-0} = Vx32{4-0};
}
-class Enc_9554661 : OpcodeHexagon {
+class Enc_5eac98 : OpcodeHexagon {
bits <6> Ii;
- let Inst{12-7} = Ii{5-0};
- bits <5> Rd32;
- let Inst{4-0} = Rd32{4-0};
-}
-class Enc_4202401 : OpcodeHexagon {
- bits <1> Mu2;
- let Inst{13-13} = Mu2{0-0};
- bits <5> Rt32;
- let Inst{12-8} = Rt32{4-0};
- bits <5> Vd32;
- let Inst{7-3} = Vd32{4-0};
- bits <5> Rx32;
- let Inst{20-16} = Rx32{4-0};
-}
-class Enc_6091631 : OpcodeHexagon {
- bits <2> Qs4;
- let Inst{9-8} = Qs4{1-0};
- bits <2> Qt4;
- let Inst{23-22} = Qt4{1-0};
- bits <2> Qd4;
- let Inst{1-0} = Qd4{1-0};
+ let Inst{13-8} = Ii{5-0};
+ bits <5> Rss32;
+ let Inst{20-16} = Rss32{4-0};
+ bits <5> Rdd32;
+ let Inst{4-0} = Rdd32{4-0};
}
-class Enc_10157519 : OpcodeHexagon {
+class Enc_02553a : OpcodeHexagon {
+ bits <7> Ii;
+ let Inst{11-5} = Ii{6-0};
bits <5> Rs32;
let Inst{20-16} = Rs32{4-0};
- bits <5> Rt32;
- let Inst{12-8} = Rt32{4-0};
bits <2> Pd4;
let Inst{1-0} = Pd4{1-0};
}
-class Enc_4835423 : OpcodeHexagon {
- bits <6> Ii;
- let Inst{10-5} = Ii{5-0};
+class Enc_acd6ed : OpcodeHexagon {
+ bits <9> Ii;
+ let Inst{10-5} = Ii{8-3};
bits <2> Pt4;
let Inst{12-11} = Pt4{1-0};
bits <5> Rs32;
let Inst{20-16} = Rs32{4-0};
- bits <5> Rd32;
- let Inst{4-0} = Rd32{4-0};
-}
-class Enc_14046916 : OpcodeHexagon {
- bits <2> Ii;
- let Inst{13-13} = Ii{1-1};
- let Inst{7-7} = Ii{0-0};
- bits <5> Rs32;
- let Inst{20-16} = Rs32{4-0};
- bits <5> Ru32;
- let Inst{12-8} = Ru32{4-0};
- bits <5> Rt32;
- let Inst{4-0} = Rt32{4-0};
-}
-class Enc_2921694 : OpcodeHexagon {
- bits <5> Rs32;
- let Inst{20-16} = Rs32{4-0};
- bits <5> Rtt32;
- let Inst{12-8} = Rtt32{4-0};
- bits <2> Pd4;
- let Inst{1-0} = Pd4{1-0};
-}
-class Enc_8732960 : OpcodeHexagon {
- bits <8> Ii;
- let Inst{12-8} = Ii{7-3};
- let Inst{4-2} = Ii{2-0};
+ bits <5> Rdd32;
+ let Inst{4-0} = Rdd32{4-0};
}
-class Enc_5338033 : OpcodeHexagon {
+class Enc_8e583a : OpcodeHexagon {
bits <11> Ii;
let Inst{21-20} = Ii{10-9};
let Inst{7-1} = Ii{8-2};
@@ -3035,355 +2493,212 @@ class Enc_5338033 : OpcodeHexagon {
let Inst{19-16} = Rs16{3-0};
bits <5> n1;
let Inst{28-28} = n1{4-4};
- let Inst{24-22} = n1{3-1};
+ let Inst{25-23} = n1{3-1};
let Inst{13-13} = n1{0-0};
}
-class Enc_6956613 : OpcodeHexagon {
- bits <1> Mu2;
- let Inst{13-13} = Mu2{0-0};
+class Enc_b886fd : OpcodeHexagon {
+ bits <5> Ii;
+ let Inst{6-3} = Ii{4-1};
+ bits <2> Pv4;
+ let Inst{1-0} = Pv4{1-0};
+ bits <5> Rt32;
+ let Inst{12-8} = Rt32{4-0};
bits <5> Rx32;
let Inst{20-16} = Rx32{4-0};
}
-class Enc_2153798 : OpcodeHexagon {
+class Enc_24a7dc : OpcodeHexagon {
bits <5> Vu32;
let Inst{12-8} = Vu32{4-0};
- bits <5> Rt32;
- let Inst{20-16} = Rt32{4-0};
- bits <5> Vxx32;
- let Inst{4-0} = Vxx32{4-0};
-}
-class Enc_16210172 : OpcodeHexagon {
- bits <3> Qt8;
- let Inst{10-8} = Qt8{2-0};
- bits <3> Qd8;
- let Inst{5-3} = Qd8{2-0};
-}
-class Enc_5023792 : OpcodeHexagon {
- bits <5> Vuu32;
- let Inst{12-8} = Vuu32{4-0};
- bits <5> Rt32;
- let Inst{20-16} = Rt32{4-0};
+ bits <5> Vv32;
+ let Inst{23-19} = Vv32{4-0};
+ bits <3> Rt8;
+ let Inst{18-16} = Rt8{2-0};
bits <5> Vdd32;
let Inst{4-0} = Vdd32{4-0};
}
-class Enc_1244745 : OpcodeHexagon {
- bits <4> Ii;
- let Inst{13-13} = Ii{3-3};
- let Inst{10-8} = Ii{2-0};
- bits <5> Rt32;
- let Inst{20-16} = Rt32{4-0};
- bits <5> Vd32;
- let Inst{4-0} = Vd32{4-0};
-}
-class Enc_10002182 : OpcodeHexagon {
- bits <11> Ii;
- let Inst{26-25} = Ii{10-9};
- let Inst{13-13} = Ii{8-8};
- let Inst{7-0} = Ii{7-0};
+class Enc_2d829e : OpcodeHexagon {
+ bits <14> Ii;
+ let Inst{10-0} = Ii{13-3};
bits <5> Rs32;
let Inst{20-16} = Rs32{4-0};
- bits <3> Nt8;
- let Inst{10-8} = Nt8{2-0};
}
-class Enc_12492533 : OpcodeHexagon {
- bits <4> Ii;
- let Inst{6-3} = Ii{3-0};
- bits <5> Rt32;
- let Inst{12-8} = Rt32{4-0};
- bits <5> Rx32;
- let Inst{20-16} = Rx32{4-0};
+class Enc_4f4ed7 : OpcodeHexagon {
+ bits <18> Ii;
+ let Inst{26-25} = Ii{17-16};
+ let Inst{20-16} = Ii{15-11};
+ let Inst{13-5} = Ii{10-2};
+ bits <5> Rd32;
+ let Inst{4-0} = Rd32{4-0};
}
-class Enc_1774350 : OpcodeHexagon {
- bits <6> Ii;
- let Inst{17-16} = Ii{5-4};
- let Inst{6-3} = Ii{3-0};
+class Enc_84b2cd : OpcodeHexagon {
+ bits <8> Ii;
+ let Inst{12-7} = Ii{7-2};
+ bits <5> II;
+ let Inst{4-0} = II{4-0};
+ bits <5> Rs32;
+ let Inst{20-16} = Rs32{4-0};
+}
+class Enc_8dbdfe : OpcodeHexagon {
+ bits <8> Ii;
+ let Inst{13-13} = Ii{7-7};
+ let Inst{7-3} = Ii{6-2};
bits <2> Pv4;
let Inst{1-0} = Pv4{1-0};
+ bits <5> Rs32;
+ let Inst{20-16} = Rs32{4-0};
bits <3> Nt8;
let Inst{10-8} = Nt8{2-0};
}
-class Enc_2703240 : OpcodeHexagon {
- bits <4> Ii;
- let Inst{13-13} = Ii{3-3};
- let Inst{10-8} = Ii{2-0};
- bits <2> Qv4;
- let Inst{12-11} = Qv4{1-0};
- bits <5> Rt32;
- let Inst{20-16} = Rt32{4-0};
- bits <5> Vs32;
- let Inst{4-0} = Vs32{4-0};
+class Enc_90cd8b : OpcodeHexagon {
+ bits <5> Rss32;
+ let Inst{20-16} = Rss32{4-0};
+ bits <5> Rd32;
+ let Inst{4-0} = Rd32{4-0};
}
-class Enc_6975103 : OpcodeHexagon {
- bits <2> Ps4;
- let Inst{17-16} = Ps4{1-0};
+class Enc_bd0b33 : OpcodeHexagon {
+ bits <10> Ii;
+ let Inst{21-21} = Ii{9-9};
+ let Inst{13-5} = Ii{8-0};
+ bits <5> Rs32;
+ let Inst{20-16} = Rs32{4-0};
bits <2> Pd4;
let Inst{1-0} = Pd4{1-0};
}
-class Enc_9789480 : OpcodeHexagon {
- bits <5> Vu32;
- let Inst{20-16} = Vu32{4-0};
- bits <5> Vv32;
- let Inst{12-8} = Vv32{4-0};
- bits <5> Vdd32;
- let Inst{7-3} = Vdd32{4-0};
-}
-class Enc_12244921 : OpcodeHexagon {
- bits <6> Ii;
- let Inst{10-8} = Ii{2-0};
- bits <3> Os8;
- let Inst{2-0} = Os8{2-0};
+class Enc_c7cd90 : OpcodeHexagon {
+ bits <4> Ii;
+ let Inst{6-3} = Ii{3-0};
+ bits <3> Nt8;
+ let Inst{10-8} = Nt8{2-0};
bits <5> Rx32;
let Inst{20-16} = Rx32{4-0};
}
-class Enc_8674673 : OpcodeHexagon {
+class Enc_405228 : OpcodeHexagon {
bits <11> Ii;
let Inst{21-20} = Ii{10-9};
let Inst{7-1} = Ii{8-2};
- bits <3> Ns8;
- let Inst{18-16} = Ns8{2-0};
- bits <5> n1;
- let Inst{29-29} = n1{4-4};
- let Inst{26-25} = n1{3-2};
- let Inst{23-22} = n1{1-0};
-}
-class Enc_8514936 : OpcodeHexagon {
- bits <5> Rx32;
- let Inst{20-16} = Rx32{4-0};
-}
-class Enc_13455308 : OpcodeHexagon {
- bits <8> Ii;
- let Inst{12-5} = Ii{7-0};
- bits <5> Rss32;
- let Inst{20-16} = Rss32{4-0};
- bits <2> Pd4;
- let Inst{1-0} = Pd4{1-0};
+ bits <4> Rs16;
+ let Inst{19-16} = Rs16{3-0};
+ bits <3> n1;
+ let Inst{28-28} = n1{2-2};
+ let Inst{24-23} = n1{1-0};
}
-class Enc_10188026 : OpcodeHexagon {
- bits <6> Ii;
- let Inst{13-8} = Ii{5-0};
- bits <5> Rss32;
- let Inst{20-16} = Rss32{4-0};
- bits <5> Rd32;
- let Inst{4-0} = Rd32{4-0};
+class Enc_81ac1d : OpcodeHexagon {
+ bits <24> Ii;
+ let Inst{24-16} = Ii{23-15};
+ let Inst{13-1} = Ii{14-2};
}
-class Enc_3158657 : OpcodeHexagon {
- bits <2> Pv4;
- let Inst{12-11} = Pv4{1-0};
+class Enc_395cc4 : OpcodeHexagon {
+ bits <7> Ii;
+ let Inst{6-3} = Ii{6-3};
bits <1> Mu2;
let Inst{13-13} = Mu2{0-0};
- bits <5> Vd32;
- let Inst{4-0} = Vd32{4-0};
- bits <5> Rx32;
- let Inst{20-16} = Rx32{4-0};
-}
-class Enc_10597934 : OpcodeHexagon {
- bits <4> Rs16;
- let Inst{7-4} = Rs16{3-0};
- bits <4> Rd16;
- let Inst{3-0} = Rd16{3-0};
- bits <2> n1;
- let Inst{9-8} = n1{1-0};
-}
-class Enc_10612292 : OpcodeHexagon {
- bits <5> Vu32;
- let Inst{12-8} = Vu32{4-0};
- bits <5> Rt32;
- let Inst{20-16} = Rt32{4-0};
- bits <2> Qx4;
- let Inst{1-0} = Qx4{1-0};
-}
-class Enc_5178985 : OpcodeHexagon {
- bits <5> Rss32;
- let Inst{20-16} = Rss32{4-0};
bits <5> Rtt32;
let Inst{12-8} = Rtt32{4-0};
- bits <2> Pu4;
- let Inst{6-5} = Pu4{1-0};
- bits <5> Rdd32;
- let Inst{4-0} = Rdd32{4-0};
-}
-class Enc_3967902 : OpcodeHexagon {
- bits <8> Ii;
- let Inst{12-7} = Ii{7-2};
- bits <6> II;
- let Inst{13-13} = II{5-5};
- let Inst{4-0} = II{4-0};
- bits <2> Pv4;
- let Inst{6-5} = Pv4{1-0};
- bits <5> Rs32;
- let Inst{20-16} = Rs32{4-0};
+ bits <5> Rx32;
+ let Inst{20-16} = Rx32{4-0};
}
-class Enc_2462143 : OpcodeHexagon {
+class Enc_a51a9a : OpcodeHexagon {
bits <8> Ii;
- let Inst{12-5} = Ii{7-0};
- bits <5> Rs32;
- let Inst{20-16} = Rs32{4-0};
- bits <5> Rdd32;
- let Inst{4-0} = Rdd32{4-0};
+ let Inst{12-8} = Ii{7-3};
+ let Inst{4-2} = Ii{2-0};
}
-class Enc_9849208 : OpcodeHexagon {
- bits <8> Ii;
- let Inst{12-7} = Ii{7-2};
+class Enc_d44e31 : OpcodeHexagon {
+ bits <6> Ii;
+ let Inst{12-7} = Ii{5-0};
bits <5> Rs32;
let Inst{20-16} = Rs32{4-0};
bits <5> Rt32;
let Inst{4-0} = Rt32{4-0};
}
-class Enc_12618352 : OpcodeHexagon {
- bits <5> Rtt32;
- let Inst{20-16} = Rtt32{4-0};
- bits <5> Vx32;
- let Inst{7-3} = Vx32{4-0};
-}
-class Enc_7303598 : OpcodeHexagon {
- bits <2> Ii;
- let Inst{13-13} = Ii{1-1};
- let Inst{7-7} = Ii{0-0};
- bits <6> II;
- let Inst{11-8} = II{5-2};
- let Inst{6-5} = II{1-0};
- bits <5> Rt32;
- let Inst{20-16} = Rt32{4-0};
- bits <5> Ryy32;
- let Inst{4-0} = Ryy32{4-0};
-}
-class Enc_13823098 : OpcodeHexagon {
- bits <5> Gss32;
- let Inst{20-16} = Gss32{4-0};
- bits <5> Rdd32;
- let Inst{4-0} = Rdd32{4-0};
-}
-class Enc_16388420 : OpcodeHexagon {
- bits <2> Qs4;
- let Inst{6-5} = Qs4{1-0};
+class Enc_f77fbc : OpcodeHexagon {
+ bits <4> Ii;
+ let Inst{13-13} = Ii{3-3};
+ let Inst{10-8} = Ii{2-0};
bits <5> Rt32;
let Inst{20-16} = Rt32{4-0};
- bits <1> Mu2;
- let Inst{13-13} = Mu2{0-0};
- bits <5> Vvv32;
- let Inst{12-8} = Vvv32{4-0};
- bits <5> Vw32;
- let Inst{4-0} = Vw32{4-0};
-}
-class Enc_8328140 : OpcodeHexagon {
- bits <16> Ii;
- let Inst{21-21} = Ii{15-15};
- let Inst{13-8} = Ii{14-9};
- let Inst{2-0} = Ii{8-6};
- bits <5> Vdd32;
- let Inst{7-3} = Vdd32{4-0};
- bits <5> Rx32;
- let Inst{20-16} = Rx32{4-0};
+ bits <3> Os8;
+ let Inst{2-0} = Os8{2-0};
}
-class Enc_1793896 : OpcodeHexagon {
- bits <2> Ii;
- let Inst{13-13} = Ii{1-1};
- let Inst{7-7} = Ii{0-0};
- bits <2> Pv4;
- let Inst{6-5} = Pv4{1-0};
- bits <5> Rs32;
- let Inst{20-16} = Rs32{4-0};
- bits <5> Rt32;
- let Inst{12-8} = Rt32{4-0};
+class Enc_d2216a : OpcodeHexagon {
+ bits <5> Rss32;
+ let Inst{20-16} = Rss32{4-0};
+ bits <5> Rtt32;
+ let Inst{12-8} = Rtt32{4-0};
bits <5> Rd32;
let Inst{4-0} = Rd32{4-0};
}
-class Enc_4944558 : OpcodeHexagon {
- bits <2> Qu4;
- let Inst{9-8} = Qu4{1-0};
- bits <5> Rt32;
- let Inst{20-16} = Rt32{4-0};
- bits <5> Vx32;
- let Inst{4-0} = Vx32{4-0};
+class Enc_85bf58 : OpcodeHexagon {
+ bits <7> Ii;
+ let Inst{6-3} = Ii{6-3};
+ bits <5> Rtt32;
+ let Inst{12-8} = Rtt32{4-0};
+ bits <5> Rx32;
+ let Inst{20-16} = Rx32{4-0};
}
-class Enc_13211717 : OpcodeHexagon {
- bits <5> Vuu32;
- let Inst{12-8} = Vuu32{4-0};
- bits <5> Vvv32;
- let Inst{20-16} = Vvv32{4-0};
+class Enc_71bb9b : OpcodeHexagon {
+ bits <5> Vu32;
+ let Inst{12-8} = Vu32{4-0};
+ bits <5> Vv32;
+ let Inst{20-16} = Vv32{4-0};
bits <5> Vdd32;
let Inst{4-0} = Vdd32{4-0};
}
-class Enc_8170340 : OpcodeHexagon {
- bits <5> Rt32;
- let Inst{20-16} = Rt32{4-0};
- bits <5> Vx32;
- let Inst{7-3} = Vx32{4-0};
- bits <3> Qdd8;
- let Inst{2-0} = Qdd8{2-0};
+class Enc_52a5dd : OpcodeHexagon {
+ bits <4> Ii;
+ let Inst{6-3} = Ii{3-0};
+ bits <2> Pv4;
+ let Inst{1-0} = Pv4{1-0};
+ bits <3> Nt8;
+ let Inst{10-8} = Nt8{2-0};
+ bits <5> Rx32;
+ let Inst{20-16} = Rx32{4-0};
}
-class Enc_14071773 : OpcodeHexagon {
+class Enc_5e2823 : OpcodeHexagon {
bits <5> Rs32;
let Inst{20-16} = Rs32{4-0};
- bits <5> Rt32;
- let Inst{12-8} = Rt32{4-0};
bits <5> Rd32;
let Inst{4-0} = Rd32{4-0};
}
-class Enc_8605375 : OpcodeHexagon {
- bits <5> Rt32;
- let Inst{12-8} = Rt32{4-0};
+class Enc_28a2dc : OpcodeHexagon {
+ bits <5> Ii;
+ let Inst{12-8} = Ii{4-0};
bits <5> Rs32;
let Inst{20-16} = Rs32{4-0};
- bits <5> Rd32;
- let Inst{4-0} = Rd32{4-0};
-}
-class Enc_12711252 : OpcodeHexagon {
- bits <2> Pv4;
- let Inst{9-8} = Pv4{1-0};
+ bits <5> Rx32;
+ let Inst{4-0} = Rx32{4-0};
}
-class Enc_8202458 : OpcodeHexagon {
- bits <2> Pu4;
- let Inst{6-5} = Pu4{1-0};
- bits <5> Rs32;
- let Inst{20-16} = Rs32{4-0};
+class Enc_5138b3 : OpcodeHexagon {
+ bits <5> Vu32;
+ let Inst{12-8} = Vu32{4-0};
bits <5> Rt32;
- let Inst{12-8} = Rt32{4-0};
- bits <5> Rdd32;
- let Inst{4-0} = Rdd32{4-0};
+ let Inst{20-16} = Rt32{4-0};
+ bits <5> Vx32;
+ let Inst{4-0} = Vx32{4-0};
}
-class Enc_8577055 : OpcodeHexagon {
- bits <11> Ii;
- let Inst{21-20} = Ii{10-9};
- let Inst{7-1} = Ii{8-2};
+class Enc_84d359 : OpcodeHexagon {
+ bits <4> Ii;
+ let Inst{3-0} = Ii{3-0};
bits <4> Rs16;
- let Inst{19-16} = Rs16{3-0};
- bits <5> n1;
- let Inst{28-28} = n1{4-4};
- let Inst{25-23} = n1{3-1};
- let Inst{8-8} = n1{0-0};
+ let Inst{7-4} = Rs16{3-0};
}
-class Enc_1409050 : OpcodeHexagon {
+class Enc_e07374 : OpcodeHexagon {
bits <5> Rs32;
let Inst{20-16} = Rs32{4-0};
- bits <5> Rt32;
- let Inst{12-8} = Rt32{4-0};
- bits <5> Rxx32;
- let Inst{4-0} = Rxx32{4-0};
-}
-class Enc_7466005 : OpcodeHexagon {
- bits <5> Gs32;
- let Inst{20-16} = Gs32{4-0};
+ bits <5> Rtt32;
+ let Inst{12-8} = Rtt32{4-0};
bits <5> Rd32;
let Inst{4-0} = Rd32{4-0};
}
-class Enc_2380082 : OpcodeHexagon {
- bits <5> Ii;
- let Inst{12-8} = Ii{4-0};
- bits <5> Rss32;
- let Inst{20-16} = Rss32{4-0};
+class Enc_323f2d : OpcodeHexagon {
+ bits <6> II;
+ let Inst{11-8} = II{5-2};
+ let Inst{6-5} = II{1-0};
bits <5> Rd32;
let Inst{4-0} = Rd32{4-0};
+ bits <5> Re32;
+ let Inst{20-16} = Re32{4-0};
}
-class Enc_10067774 : OpcodeHexagon {
- bits <1> Mu2;
- let Inst{13-13} = Mu2{0-0};
- bits <3> Nt8;
- let Inst{10-8} = Nt8{2-0};
- bits <5> Rx32;
- let Inst{20-16} = Rx32{4-0};
-}
-class Enc_11000933 : OpcodeHexagon {
+class Enc_1a9974 : OpcodeHexagon {
bits <2> Ii;
let Inst{13-13} = Ii{1-1};
let Inst{7-7} = Ii{0-0};
@@ -3393,55 +2708,66 @@ class Enc_11000933 : OpcodeHexagon {
let Inst{20-16} = Rs32{4-0};
bits <5> Ru32;
let Inst{12-8} = Ru32{4-0};
- bits <3> Nt8;
- let Inst{2-0} = Nt8{2-0};
+ bits <5> Rtt32;
+ let Inst{4-0} = Rtt32{4-0};
}
-class Enc_13201267 : OpcodeHexagon {
- bits <5> Ii;
- let Inst{12-8} = Ii{4-0};
- bits <5> Rss32;
- let Inst{20-16} = Rss32{4-0};
- bits <5> Rdd32;
- let Inst{4-0} = Rdd32{4-0};
+class Enc_1de724 : OpcodeHexagon {
+ bits <11> Ii;
+ let Inst{21-20} = Ii{10-9};
+ let Inst{7-1} = Ii{8-2};
+ bits <4> Rs16;
+ let Inst{19-16} = Rs16{3-0};
+ bits <4> n1;
+ let Inst{28-28} = n1{3-3};
+ let Inst{24-22} = n1{2-0};
}
-class Enc_1989309 : OpcodeHexagon {
- bits <5> Rt32;
- let Inst{20-16} = Rt32{4-0};
- bits <1> Mu2;
- let Inst{13-13} = Mu2{0-0};
- bits <5> Vvv32;
- let Inst{4-0} = Vvv32{4-0};
+class Enc_dd766a : OpcodeHexagon {
+ bits <5> Vu32;
+ let Inst{12-8} = Vu32{4-0};
+ bits <5> Vdd32;
+ let Inst{4-0} = Vdd32{4-0};
+}
+class Enc_0b51ce : OpcodeHexagon {
+ bits <3> Ii;
+ let Inst{10-8} = Ii{2-0};
+ bits <2> Qv4;
+ let Inst{12-11} = Qv4{1-0};
+ bits <5> Vs32;
+ let Inst{4-0} = Vs32{4-0};
+ bits <5> Rx32;
+ let Inst{20-16} = Rx32{4-0};
}
-class Enc_9082775 : OpcodeHexagon {
+class Enc_b4e6cf : OpcodeHexagon {
bits <10> Ii;
let Inst{21-21} = Ii{9-9};
let Inst{13-5} = Ii{8-0};
- bits <5> Rd32;
- let Inst{4-0} = Rd32{4-0};
+ bits <5> Ru32;
+ let Inst{4-0} = Ru32{4-0};
+ bits <5> Rx32;
+ let Inst{20-16} = Rx32{4-0};
}
-class Enc_8065534 : OpcodeHexagon {
- bits <4> Ii;
+class Enc_44215c : OpcodeHexagon {
+ bits <6> Ii;
+ let Inst{17-16} = Ii{5-4};
let Inst{6-3} = Ii{3-0};
bits <2> Pv4;
let Inst{1-0} = Pv4{1-0};
- bits <5> Rt32;
- let Inst{12-8} = Rt32{4-0};
- bits <5> Rx32;
- let Inst{20-16} = Rx32{4-0};
+ bits <3> Nt8;
+ let Inst{10-8} = Nt8{2-0};
}
-class Enc_4631106 : OpcodeHexagon {
- bits <2> Ps4;
- let Inst{17-16} = Ps4{1-0};
+class Enc_a21d47 : OpcodeHexagon {
+ bits <6> Ii;
+ let Inst{10-5} = Ii{5-0};
bits <2> Pt4;
- let Inst{9-8} = Pt4{1-0};
- bits <2> Pu4;
- let Inst{7-6} = Pu4{1-0};
- bits <2> Pd4;
- let Inst{1-0} = Pd4{1-0};
+ let Inst{12-11} = Pt4{1-0};
+ bits <5> Rs32;
+ let Inst{20-16} = Rs32{4-0};
+ bits <5> Rd32;
+ let Inst{4-0} = Rd32{4-0};
}
-class Enc_11065510 : OpcodeHexagon {
- bits <5> Ii;
- let Inst{6-3} = Ii{4-1};
+class Enc_cc449f : OpcodeHexagon {
+ bits <4> Ii;
+ let Inst{6-3} = Ii{3-0};
bits <2> Pv4;
let Inst{1-0} = Pv4{1-0};
bits <5> Rt32;
@@ -3449,70 +2775,7 @@ class Enc_11065510 : OpcodeHexagon {
bits <5> Rx32;
let Inst{20-16} = Rx32{4-0};
}
-class Enc_6673186 : OpcodeHexagon {
- bits <13> Ii;
- let Inst{26-25} = Ii{12-11};
- let Inst{13-13} = Ii{10-10};
- let Inst{7-0} = Ii{9-2};
- bits <5> Rs32;
- let Inst{20-16} = Rs32{4-0};
- bits <5> Rt32;
- let Inst{12-8} = Rt32{4-0};
-}
-class Enc_8498433 : OpcodeHexagon {
- bits <2> Pv4;
- let Inst{12-11} = Pv4{1-0};
- bits <1> Mu2;
- let Inst{13-13} = Mu2{0-0};
- bits <3> Os8;
- let Inst{2-0} = Os8{2-0};
- bits <5> Rx32;
- let Inst{20-16} = Rx32{4-0};
-}
-class Enc_4395009 : OpcodeHexagon {
- bits <7> Ii;
- bits <2> Pv4;
- let Inst{12-11} = Pv4{1-0};
- bits <5> Vs32;
- let Inst{4-0} = Vs32{4-0};
- bits <5> Rx32;
- let Inst{20-16} = Rx32{4-0};
-}
-class Enc_10926598 : OpcodeHexagon {
- bits <5> Vuu32;
- let Inst{12-8} = Vuu32{4-0};
- bits <5> Rt32;
- let Inst{20-16} = Rt32{4-0};
- bits <5> Vxx32;
- let Inst{7-3} = Vxx32{4-0};
-}
-class Enc_7606379 : OpcodeHexagon {
- bits <2> Pu4;
- let Inst{6-5} = Pu4{1-0};
- bits <5> Rss32;
- let Inst{20-16} = Rss32{4-0};
- bits <5> Rtt32;
- let Inst{12-8} = Rtt32{4-0};
- bits <5> Rdd32;
- let Inst{4-0} = Rdd32{4-0};
-}
-class Enc_8131399 : OpcodeHexagon {
- bits <6> II;
- let Inst{5-0} = II{5-0};
- bits <5> Rtt32;
- let Inst{12-8} = Rtt32{4-0};
- bits <5> Re32;
- let Inst{20-16} = Re32{4-0};
-}
-class Enc_11522288 : OpcodeHexagon {
- bits <8> Ii;
- let Inst{12-5} = Ii{7-0};
- bits <5> Rs32;
- let Inst{20-16} = Rs32{4-0};
- bits <5> Rx32;
- let Inst{4-0} = Rx32{4-0};
-}
-class Enc_114098 : OpcodeHexagon {
+class Enc_645d54 : OpcodeHexagon {
bits <2> Ii;
let Inst{13-13} = Ii{1-1};
let Inst{5-5} = Ii{0-0};
@@ -3523,47 +2786,29 @@ class Enc_114098 : OpcodeHexagon {
bits <5> Rdd32;
let Inst{4-0} = Rdd32{4-0};
}
-class Enc_5654851 : OpcodeHexagon {
- bits <5> Ii;
- let Inst{12-8} = Ii{4-0};
- bits <5> Rs32;
- let Inst{20-16} = Rs32{4-0};
+class Enc_667b39 : OpcodeHexagon {
+ bits <5> Css32;
+ let Inst{20-16} = Css32{4-0};
bits <5> Rdd32;
let Inst{4-0} = Rdd32{4-0};
}
-class Enc_12023037 : OpcodeHexagon {
- bits <2> Ps4;
- let Inst{6-5} = Ps4{1-0};
- bits <5> Vu32;
- let Inst{12-8} = Vu32{4-0};
- bits <5> Vd32;
- let Inst{4-0} = Vd32{4-0};
-}
-class Enc_176263 : OpcodeHexagon {
- bits <8> Ii;
- let Inst{9-4} = Ii{7-2};
- bits <4> Rd16;
- let Inst{3-0} = Rd16{3-0};
-}
-class Enc_6130414 : OpcodeHexagon {
- bits <16> Ii;
- let Inst{23-22} = Ii{15-14};
- let Inst{13-0} = Ii{13-0};
- bits <5> Rx32;
- let Inst{20-16} = Rx32{4-0};
-}
-class Enc_631197 : OpcodeHexagon {
- bits <6> Ii;
- let Inst{13-8} = Ii{5-0};
- bits <6> II;
- let Inst{23-21} = II{5-3};
- let Inst{7-5} = II{2-0};
+class Enc_927852 : OpcodeHexagon {
bits <5> Rss32;
let Inst{20-16} = Rss32{4-0};
- bits <5> Rxx32;
- let Inst{4-0} = Rxx32{4-0};
+ bits <5> Rt32;
+ let Inst{12-8} = Rt32{4-0};
+ bits <5> Rdd32;
+ let Inst{4-0} = Rdd32{4-0};
+}
+class Enc_163a3c : OpcodeHexagon {
+ bits <7> Ii;
+ let Inst{12-7} = Ii{6-1};
+ bits <5> Rs32;
+ let Inst{20-16} = Rs32{4-0};
+ bits <5> Rt32;
+ let Inst{4-0} = Rt32{4-0};
}
-class Enc_16214129 : OpcodeHexagon {
+class Enc_b087ac : OpcodeHexagon {
bits <5> Vu32;
let Inst{12-8} = Vu32{4-0};
bits <5> Rt32;
@@ -3571,507 +2816,412 @@ class Enc_16214129 : OpcodeHexagon {
bits <5> Vd32;
let Inst{4-0} = Vd32{4-0};
}
-class Enc_8333157 : OpcodeHexagon {
- bits <5> Rss32;
- let Inst{20-16} = Rss32{4-0};
- bits <5> Rtt32;
- let Inst{12-8} = Rtt32{4-0};
- bits <5> Rdd32;
- let Inst{4-0} = Rdd32{4-0};
-}
-class Enc_4834775 : OpcodeHexagon {
- bits <6> II;
- let Inst{13-8} = II{5-0};
+class Enc_b1e1fb : OpcodeHexagon {
bits <11> Ii;
let Inst{21-20} = Ii{10-9};
let Inst{7-1} = Ii{8-2};
- bits <4> Rd16;
- let Inst{19-16} = Rd16{3-0};
+ bits <4> Rs16;
+ let Inst{19-16} = Rs16{3-0};
+ bits <5> n1;
+ let Inst{28-28} = n1{4-4};
+ let Inst{25-23} = n1{3-1};
+ let Inst{8-8} = n1{0-0};
}
-class Enc_16601956 : OpcodeHexagon {
- bits <5> Vu32;
- let Inst{12-8} = Vu32{4-0};
+class Enc_1f19b5 : OpcodeHexagon {
+ bits <5> Ii;
+ let Inst{9-5} = Ii{4-0};
+ bits <5> Rss32;
+ let Inst{20-16} = Rss32{4-0};
+ bits <2> Pd4;
+ let Inst{1-0} = Pd4{1-0};
+}
+class Enc_b8c967 : OpcodeHexagon {
+ bits <8> Ii;
+ let Inst{12-5} = Ii{7-0};
bits <5> Rs32;
let Inst{20-16} = Rs32{4-0};
bits <5> Rd32;
let Inst{4-0} = Rd32{4-0};
}
-class Enc_15946706 : OpcodeHexagon {
- bits <2> Ii;
- let Inst{6-5} = Ii{1-0};
- bits <3> Rdd8;
- let Inst{2-0} = Rdd8{2-0};
-}
-class Enc_6923828 : OpcodeHexagon {
- bits <4> Ii;
- let Inst{13-13} = Ii{3-3};
- let Inst{10-8} = Ii{2-0};
- bits <5> Rt32;
- let Inst{20-16} = Rt32{4-0};
- bits <5> Vs32;
- let Inst{4-0} = Vs32{4-0};
-}
-class Enc_1332717 : OpcodeHexagon {
+class Enc_fb6577 : OpcodeHexagon {
bits <2> Pu4;
- let Inst{6-5} = Pu4{1-0};
- bits <5> Rt32;
- let Inst{12-8} = Rt32{4-0};
+ let Inst{9-8} = Pu4{1-0};
bits <5> Rs32;
let Inst{20-16} = Rs32{4-0};
bits <5> Rd32;
let Inst{4-0} = Rd32{4-0};
}
-class Enc_1786883 : OpcodeHexagon {
- bits <5> Rss32;
- let Inst{20-16} = Rss32{4-0};
- bits <6> Sdd64;
- let Inst{5-0} = Sdd64{5-0};
-}
-class Enc_14303394 : OpcodeHexagon {
- bits <6> Ii;
- let Inst{8-5} = Ii{5-2};
- bits <1> Mu2;
- let Inst{13-13} = Mu2{0-0};
- bits <5> Rd32;
- let Inst{4-0} = Rd32{4-0};
- bits <5> Rx32;
- let Inst{20-16} = Rx32{4-0};
-}
-class Enc_9282127 : OpcodeHexagon {
- bits <8> Ii;
- let Inst{12-7} = Ii{7-2};
- bits <8> II;
- let Inst{13-13} = II{7-7};
- let Inst{6-0} = II{6-0};
- bits <5> Rs32;
- let Inst{20-16} = Rs32{4-0};
-}
-class Enc_2813446 : OpcodeHexagon {
+class Enc_2bae10 : OpcodeHexagon {
bits <4> Ii;
- let Inst{6-3} = Ii{3-0};
- bits <2> Pv4;
- let Inst{1-0} = Pv4{1-0};
- bits <3> Nt8;
- let Inst{10-8} = Nt8{2-0};
- bits <5> Rx32;
- let Inst{20-16} = Rx32{4-0};
-}
-class Enc_364753 : OpcodeHexagon {
- bits <11> Ii;
- let Inst{21-20} = Ii{10-9};
- let Inst{7-1} = Ii{8-2};
- bits <3> Ns8;
- let Inst{18-16} = Ns8{2-0};
- bits <4> n1;
- let Inst{29-29} = n1{3-3};
- let Inst{26-25} = n1{2-1};
- let Inst{23-23} = n1{0-0};
+ let Inst{10-8} = Ii{3-1};
+ bits <4> Rs16;
+ let Inst{7-4} = Rs16{3-0};
+ bits <4> Rd16;
+ let Inst{3-0} = Rd16{3-0};
}
-class Enc_12477789 : OpcodeHexagon {
- bits <15> Ii;
- let Inst{21-21} = Ii{14-14};
- let Inst{13-13} = Ii{13-13};
- let Inst{11-1} = Ii{12-2};
- bits <5> Rs32;
- let Inst{20-16} = Rs32{4-0};
+class Enc_c4dc92 : OpcodeHexagon {
+ bits <2> Qv4;
+ let Inst{23-22} = Qv4{1-0};
+ bits <5> Vu32;
+ let Inst{12-8} = Vu32{4-0};
+ bits <5> Vd32;
+ let Inst{4-0} = Vd32{4-0};
}
-class Enc_44555 : OpcodeHexagon {
+class Enc_03833b : OpcodeHexagon {
+ bits <5> Rss32;
+ let Inst{20-16} = Rss32{4-0};
bits <5> Rt32;
- let Inst{20-16} = Rt32{4-0};
- bits <5> Vd32;
- let Inst{7-3} = Vd32{4-0};
+ let Inst{12-8} = Rt32{4-0};
+ bits <2> Pd4;
+ let Inst{1-0} = Pd4{1-0};
}
-class Enc_8497723 : OpcodeHexagon {
- bits <6> Ii;
- let Inst{13-8} = Ii{5-0};
+class Enc_dbd70c : OpcodeHexagon {
bits <5> Rss32;
let Inst{20-16} = Rss32{4-0};
- bits <5> Rxx32;
- let Inst{4-0} = Rxx32{4-0};
+ bits <5> Rtt32;
+ let Inst{12-8} = Rtt32{4-0};
+ bits <2> Pu4;
+ let Inst{6-5} = Pu4{1-0};
+ bits <5> Rdd32;
+ let Inst{4-0} = Rdd32{4-0};
}
-class Enc_4359901 : OpcodeHexagon {
+class Enc_f6fe0b : OpcodeHexagon {
bits <11> Ii;
let Inst{21-20} = Ii{10-9};
let Inst{7-1} = Ii{8-2};
- bits <3> Ns8;
- let Inst{18-16} = Ns8{2-0};
- bits <4> n1;
- let Inst{29-29} = n1{3-3};
- let Inst{26-25} = n1{2-1};
- let Inst{22-22} = n1{0-0};
+ bits <4> Rs16;
+ let Inst{19-16} = Rs16{3-0};
+ bits <6> n1;
+ let Inst{28-28} = n1{5-5};
+ let Inst{24-22} = n1{4-2};
+ let Inst{13-13} = n1{1-1};
+ let Inst{8-8} = n1{0-0};
}
-class Enc_11271630 : OpcodeHexagon {
- bits <7> Ii;
- let Inst{6-3} = Ii{6-3};
- bits <5> Rtt32;
- let Inst{12-8} = Rtt32{4-0};
+class Enc_9e2e1c : OpcodeHexagon {
+ bits <5> Ii;
+ let Inst{8-5} = Ii{4-1};
+ bits <1> Mu2;
+ let Inst{13-13} = Mu2{0-0};
+ bits <5> Ryy32;
+ let Inst{4-0} = Ryy32{4-0};
bits <5> Rx32;
let Inst{20-16} = Rx32{4-0};
}
-class Enc_10501894 : OpcodeHexagon {
+class Enc_8df4be : OpcodeHexagon {
+ bits <17> Ii;
+ let Inst{26-25} = Ii{16-15};
+ let Inst{20-16} = Ii{14-10};
+ let Inst{13-5} = Ii{9-1};
+ bits <5> Rd32;
+ let Inst{4-0} = Rd32{4-0};
+}
+class Enc_66bce1 : OpcodeHexagon {
+ bits <11> Ii;
+ let Inst{21-20} = Ii{10-9};
+ let Inst{7-1} = Ii{8-2};
bits <4> Rs16;
- let Inst{7-4} = Rs16{3-0};
- bits <3> Rdd8;
- let Inst{2-0} = Rdd8{2-0};
+ let Inst{19-16} = Rs16{3-0};
+ bits <4> Rd16;
+ let Inst{11-8} = Rd16{3-0};
+}
+class Enc_b8309d : OpcodeHexagon {
+ bits <9> Ii;
+ let Inst{8-3} = Ii{8-3};
+ bits <3> Rtt8;
+ let Inst{2-0} = Rtt8{2-0};
}
-class Enc_9768377 : OpcodeHexagon {
+class Enc_5e8512 : OpcodeHexagon {
+ bits <5> Vu32;
+ let Inst{12-8} = Vu32{4-0};
bits <5> Rt32;
let Inst{20-16} = Rt32{4-0};
- bits <5> Vd32;
- let Inst{4-0} = Vd32{4-0};
-}
-class Enc_16268019 : OpcodeHexagon {
- bits <5> Vuu32;
- let Inst{20-16} = Vuu32{4-0};
- bits <5> Vvv32;
- let Inst{12-8} = Vvv32{4-0};
- bits <5> Vdd32;
- let Inst{7-3} = Vdd32{4-0};
+ bits <5> Vxx32;
+ let Inst{4-0} = Vxx32{4-0};
}
-class Enc_8814718 : OpcodeHexagon {
- bits <18> Ii;
- let Inst{26-25} = Ii{17-16};
- let Inst{20-16} = Ii{15-11};
- let Inst{13-5} = Ii{10-2};
+class Enc_4f677b : OpcodeHexagon {
+ bits <2> Ii;
+ let Inst{13-13} = Ii{1-1};
+ let Inst{7-7} = Ii{0-0};
+ bits <6> II;
+ let Inst{11-8} = II{5-2};
+ let Inst{6-5} = II{1-0};
+ bits <5> Rt32;
+ let Inst{20-16} = Rt32{4-0};
bits <5> Rd32;
let Inst{4-0} = Rd32{4-0};
}
-class Enc_6212930 : OpcodeHexagon {
+class Enc_3d920a : OpcodeHexagon {
bits <6> Ii;
let Inst{8-5} = Ii{5-2};
- bits <2> Pt4;
- let Inst{10-9} = Pt4{1-0};
bits <5> Rd32;
let Inst{4-0} = Rd32{4-0};
bits <5> Rx32;
let Inst{20-16} = Rx32{4-0};
}
-class Enc_5462762 : OpcodeHexagon {
- bits <5> Rt32;
- let Inst{20-16} = Rt32{4-0};
+class Enc_e83554 : OpcodeHexagon {
+ bits <5> Ii;
+ let Inst{8-5} = Ii{4-1};
bits <1> Mu2;
let Inst{13-13} = Mu2{0-0};
- bits <5> Vv32;
- let Inst{12-8} = Vv32{4-0};
- bits <5> Vw32;
- let Inst{4-0} = Vw32{4-0};
+ bits <5> Rd32;
+ let Inst{4-0} = Rd32{4-0};
+ bits <5> Rx32;
+ let Inst{20-16} = Rx32{4-0};
}
-class Enc_6154421 : OpcodeHexagon {
- bits <7> Ii;
- let Inst{13-13} = Ii{6-6};
- let Inst{7-3} = Ii{5-1};
+class Enc_ed48be : OpcodeHexagon {
+ bits <2> Ii;
+ let Inst{6-5} = Ii{1-0};
+ bits <3> Rdd8;
+ let Inst{2-0} = Rdd8{2-0};
+}
+class Enc_f8c1c4 : OpcodeHexagon {
bits <2> Pv4;
- let Inst{1-0} = Pv4{1-0};
- bits <5> Rs32;
- let Inst{20-16} = Rs32{4-0};
- bits <3> Nt8;
- let Inst{10-8} = Nt8{2-0};
+ let Inst{12-11} = Pv4{1-0};
+ bits <1> Mu2;
+ let Inst{13-13} = Mu2{0-0};
+ bits <5> Vd32;
+ let Inst{4-0} = Vd32{4-0};
+ bits <5> Rx32;
+ let Inst{20-16} = Rx32{4-0};
}
-class Enc_8940892 : OpcodeHexagon {
+class Enc_1aa186 : OpcodeHexagon {
bits <5> Rss32;
let Inst{20-16} = Rss32{4-0};
bits <5> Rt32;
let Inst{12-8} = Rt32{4-0};
- bits <5> Rdd32;
- let Inst{4-0} = Rdd32{4-0};
+ bits <5> Rxx32;
+ let Inst{4-0} = Rxx32{4-0};
}
-class Enc_3531000 : OpcodeHexagon {
- bits <7> Ii;
- let Inst{11-5} = Ii{6-0};
- bits <5> Rs32;
- let Inst{20-16} = Rs32{4-0};
- bits <2> Pd4;
- let Inst{1-0} = Pd4{1-0};
+class Enc_134437 : OpcodeHexagon {
+ bits <2> Qs4;
+ let Inst{9-8} = Qs4{1-0};
+ bits <2> Qt4;
+ let Inst{23-22} = Qt4{1-0};
+ bits <2> Qd4;
+ let Inst{1-0} = Qd4{1-0};
}
-class Enc_14311138 : OpcodeHexagon {
- bits <5> Vuu32;
- let Inst{20-16} = Vuu32{4-0};
- bits <5> Vd32;
- let Inst{7-3} = Vd32{4-0};
+class Enc_97d666 : OpcodeHexagon {
+ bits <4> Rs16;
+ let Inst{7-4} = Rs16{3-0};
+ bits <4> Rd16;
+ let Inst{3-0} = Rd16{3-0};
}
-class Enc_2216485 : OpcodeHexagon {
- bits <6> Ii;
- let Inst{22-21} = Ii{5-4};
- let Inst{13-13} = Ii{3-3};
- let Inst{7-5} = Ii{2-0};
+class Enc_f82eaf : OpcodeHexagon {
+ bits <8> Ii;
+ let Inst{10-5} = Ii{7-2};
+ bits <2> Pt4;
+ let Inst{12-11} = Pt4{1-0};
bits <5> Rs32;
let Inst{20-16} = Rs32{4-0};
- bits <5> Rt32;
- let Inst{12-8} = Rt32{4-0};
bits <5> Rd32;
let Inst{4-0} = Rd32{4-0};
}
-class Enc_12395768 : OpcodeHexagon {
- bits <16> Ii;
- let Inst{26-25} = Ii{15-14};
- let Inst{20-16} = Ii{13-9};
- let Inst{13-13} = Ii{8-8};
- let Inst{7-0} = Ii{7-0};
- bits <5> Rt32;
- let Inst{12-8} = Rt32{4-0};
-}
-class Enc_11047413 : OpcodeHexagon {
- bits <6> II;
- let Inst{11-8} = II{5-2};
- let Inst{6-5} = II{1-0};
- bits <5> Ryy32;
- let Inst{4-0} = Ryy32{4-0};
- bits <5> Re32;
- let Inst{20-16} = Re32{4-0};
-}
-class Enc_1256611 : OpcodeHexagon {
- bits <5> Vu32;
- let Inst{12-8} = Vu32{4-0};
- bits <5> Rs32;
- let Inst{20-16} = Rs32{4-0};
- bits <5> Rdd32;
- let Inst{4-0} = Rdd32{4-0};
-}
-class Enc_7884306 : OpcodeHexagon {
- bits <8> Ii;
- let Inst{8-4} = Ii{7-3};
-}
-class Enc_11244923 : OpcodeHexagon {
- bits <3> Ii;
- let Inst{10-8} = Ii{2-0};
- bits <3> Os8;
- let Inst{2-0} = Os8{2-0};
- bits <5> Rx32;
- let Inst{20-16} = Rx32{4-0};
-}
-class Enc_8612939 : OpcodeHexagon {
+class Enc_69d63b : OpcodeHexagon {
bits <11> Ii;
let Inst{21-20} = Ii{10-9};
let Inst{7-1} = Ii{8-2};
bits <3> Ns8;
let Inst{18-16} = Ns8{2-0};
- bits <5> n1;
- let Inst{29-29} = n1{4-4};
- let Inst{26-25} = n1{3-2};
- let Inst{22-22} = n1{1-1};
- let Inst{13-13} = n1{0-0};
}
-class Enc_16355964 : OpcodeHexagon {
- bits <8> Ii;
- let Inst{12-5} = Ii{7-0};
+class Enc_f79415 : OpcodeHexagon {
+ bits <2> Ii;
+ let Inst{13-13} = Ii{1-1};
+ let Inst{6-6} = Ii{0-0};
+ bits <6> II;
+ let Inst{5-0} = II{5-0};
+ bits <5> Ru32;
+ let Inst{20-16} = Ru32{4-0};
+ bits <5> Rtt32;
+ let Inst{12-8} = Rtt32{4-0};
+}
+class Enc_ce6828 : OpcodeHexagon {
+ bits <14> Ii;
+ let Inst{26-25} = Ii{13-12};
+ let Inst{13-13} = Ii{11-11};
+ let Inst{7-0} = Ii{10-3};
bits <5> Rs32;
let Inst{20-16} = Rs32{4-0};
- bits <5> Rd32;
- let Inst{4-0} = Rd32{4-0};
-}
-class Enc_12616482 : OpcodeHexagon {
- bits <6> II;
- let Inst{11-8} = II{5-2};
- let Inst{6-5} = II{1-0};
- bits <5> Rd32;
- let Inst{4-0} = Rd32{4-0};
- bits <5> Re32;
- let Inst{20-16} = Re32{4-0};
+ bits <5> Rtt32;
+ let Inst{12-8} = Rtt32{4-0};
}
-class Enc_5915771 : OpcodeHexagon {
+class Enc_800e04 : OpcodeHexagon {
bits <11> Ii;
let Inst{21-20} = Ii{10-9};
let Inst{7-1} = Ii{8-2};
bits <4> Rs16;
let Inst{19-16} = Rs16{3-0};
- bits <5> n1;
- let Inst{28-28} = n1{4-4};
- let Inst{24-22} = n1{3-1};
- let Inst{8-8} = n1{0-0};
-}
-class Enc_14459927 : OpcodeHexagon {
- bits <3> Ii;
- let Inst{10-8} = Ii{2-0};
- bits <2> Pv4;
- let Inst{12-11} = Pv4{1-0};
- bits <5> Vs32;
- let Inst{4-0} = Vs32{4-0};
- bits <5> Rx32;
- let Inst{20-16} = Rx32{4-0};
+ bits <6> n1;
+ let Inst{28-28} = n1{5-5};
+ let Inst{25-22} = n1{4-1};
+ let Inst{13-13} = n1{0-0};
}
-class Enc_7504828 : OpcodeHexagon {
- bits <10> Ii;
- let Inst{21-21} = Ii{9-9};
- let Inst{13-5} = Ii{8-0};
- bits <5> Ru32;
- let Inst{4-0} = Ru32{4-0};
- bits <5> Rx32;
- let Inst{20-16} = Rx32{4-0};
+class Enc_ad1831 : OpcodeHexagon {
+ bits <16> Ii;
+ let Inst{26-25} = Ii{15-14};
+ let Inst{20-16} = Ii{13-9};
+ let Inst{13-13} = Ii{8-8};
+ let Inst{7-0} = Ii{7-0};
+ bits <3> Nt8;
+ let Inst{10-8} = Nt8{2-0};
}
-class Enc_14209223 : OpcodeHexagon {
- bits <5> Vu32;
- let Inst{20-16} = Vu32{4-0};
- bits <5> Vdd32;
- let Inst{7-3} = Vdd32{4-0};
+class Enc_0fa531 : OpcodeHexagon {
+ bits <15> Ii;
+ let Inst{21-21} = Ii{14-14};
+ let Inst{13-13} = Ii{13-13};
+ let Inst{11-1} = Ii{12-2};
+ bits <5> Rs32;
+ let Inst{20-16} = Rs32{4-0};
}
-class Enc_3931661 : OpcodeHexagon {
+class Enc_7eaeb6 : OpcodeHexagon {
bits <6> Ii;
- let Inst{8-5} = Ii{5-2};
- bits <1> Mu2;
- let Inst{13-13} = Mu2{0-0};
- bits <5> Rdd32;
- let Inst{4-0} = Rdd32{4-0};
+ let Inst{6-3} = Ii{5-2};
+ bits <2> Pv4;
+ let Inst{1-0} = Pv4{1-0};
+ bits <5> Rt32;
+ let Inst{12-8} = Rt32{4-0};
bits <5> Rx32;
let Inst{20-16} = Rx32{4-0};
}
-class Enc_13606251 : OpcodeHexagon {
+class Enc_f55a0c : OpcodeHexagon {
bits <6> Ii;
let Inst{11-8} = Ii{5-2};
bits <4> Rs16;
let Inst{7-4} = Rs16{3-0};
- bits <4> Rd16;
- let Inst{3-0} = Rd16{3-0};
-}
-class Enc_11475992 : OpcodeHexagon {
- bits <5> Vu32;
- let Inst{12-8} = Vu32{4-0};
- bits <5> Rt32;
- let Inst{20-16} = Rt32{4-0};
- bits <5> Vdd32;
- let Inst{7-3} = Vdd32{4-0};
-}
-class Enc_13133231 : OpcodeHexagon {
- bits <5> Rss32;
- let Inst{20-16} = Rss32{4-0};
- bits <5> Rdd32;
- let Inst{4-0} = Rdd32{4-0};
+ bits <4> Rt16;
+ let Inst{3-0} = Rt16{3-0};
}
-class Enc_9959498 : OpcodeHexagon {
- bits <8> Ii;
- let Inst{22-21} = Ii{7-6};
- let Inst{13-13} = Ii{5-5};
- let Inst{7-5} = Ii{4-2};
- bits <5> Ru32;
- let Inst{4-0} = Ru32{4-0};
+class Enc_f20719 : OpcodeHexagon {
+ bits <7> Ii;
+ let Inst{12-7} = Ii{6-1};
+ bits <6> II;
+ let Inst{13-13} = II{5-5};
+ let Inst{4-0} = II{4-0};
+ bits <2> Pv4;
+ let Inst{6-5} = Pv4{1-0};
bits <5> Rs32;
let Inst{20-16} = Rs32{4-0};
- bits <5> Rd32;
- let Inst{12-8} = Rd32{4-0};
}
-class Enc_8919369 : OpcodeHexagon {
+class Enc_eafd18 : OpcodeHexagon {
+ bits <5> II;
+ let Inst{12-8} = II{4-0};
bits <11> Ii;
let Inst{21-20} = Ii{10-9};
let Inst{7-1} = Ii{8-2};
- bits <4> Rs16;
- let Inst{19-16} = Rs16{3-0};
- bits <5> n1;
- let Inst{28-28} = n1{4-4};
- let Inst{24-23} = n1{3-2};
- let Inst{13-13} = n1{1-1};
- let Inst{8-8} = n1{0-0};
-}
-class Enc_2968094 : OpcodeHexagon {
- bits <7> Ii;
- let Inst{11-5} = Ii{6-0};
- bits <5> Rss32;
- let Inst{20-16} = Rss32{4-0};
- bits <2> Pd4;
- let Inst{1-0} = Pd4{1-0};
+ bits <3> Ns8;
+ let Inst{18-16} = Ns8{2-0};
}
-class Enc_4813442 : OpcodeHexagon {
- bits <6> Ii;
- let Inst{6-3} = Ii{5-2};
- bits <2> Pv4;
- let Inst{1-0} = Pv4{1-0};
- bits <3> Nt8;
- let Inst{10-8} = Nt8{2-0};
- bits <5> Rx32;
- let Inst{20-16} = Rx32{4-0};
+class Enc_7b523d : OpcodeHexagon {
+ bits <5> Vu32;
+ let Inst{12-8} = Vu32{4-0};
+ bits <5> Vv32;
+ let Inst{23-19} = Vv32{4-0};
+ bits <3> Rt8;
+ let Inst{18-16} = Rt8{2-0};
+ bits <5> Vxx32;
+ let Inst{4-0} = Vxx32{4-0};
}
-class Enc_4684887 : OpcodeHexagon {
- bits <11> Ii;
- let Inst{21-20} = Ii{10-9};
- let Inst{7-1} = Ii{8-2};
- bits <4> Rs16;
- let Inst{19-16} = Rs16{3-0};
- bits <4> n1;
- let Inst{28-28} = n1{3-3};
- let Inst{25-23} = n1{2-0};
+class Enc_47ef61 : OpcodeHexagon {
+ bits <3> Ii;
+ let Inst{7-5} = Ii{2-0};
+ bits <5> Rt32;
+ let Inst{12-8} = Rt32{4-0};
+ bits <5> Rs32;
+ let Inst{20-16} = Rs32{4-0};
+ bits <5> Rd32;
+ let Inst{4-0} = Rd32{4-0};
}
-class Enc_15606259 : OpcodeHexagon {
- bits <4> Ii;
- let Inst{11-8} = Ii{3-0};
- bits <4> Rs16;
- let Inst{7-4} = Rs16{3-0};
- bits <4> Rd16;
- let Inst{3-0} = Rd16{3-0};
+class Enc_cc857d : OpcodeHexagon {
+ bits <5> Vuu32;
+ let Inst{12-8} = Vuu32{4-0};
+ bits <5> Rt32;
+ let Inst{20-16} = Rt32{4-0};
+ bits <5> Vx32;
+ let Inst{4-0} = Vx32{4-0};
}
-class Enc_2268028 : OpcodeHexagon {
- bits <3> Qtt8;
- let Inst{10-8} = Qtt8{2-0};
- bits <3> Qdd8;
- let Inst{5-3} = Qdd8{2-0};
+class Enc_7fa7f6 : OpcodeHexagon {
+ bits <6> II;
+ let Inst{11-8} = II{5-2};
+ let Inst{6-5} = II{1-0};
+ bits <5> Rdd32;
+ let Inst{4-0} = Rdd32{4-0};
+ bits <5> Re32;
+ let Inst{20-16} = Re32{4-0};
}
-class Enc_13430430 : OpcodeHexagon {
+class Enc_0f8bab : OpcodeHexagon {
bits <5> Vu32;
let Inst{12-8} = Vu32{4-0};
bits <5> Rt32;
let Inst{20-16} = Rt32{4-0};
- bits <5> Vd32;
- let Inst{7-3} = Vd32{4-0};
- bits <3> Qxx8;
- let Inst{2-0} = Qxx8{2-0};
+ bits <2> Qd4;
+ let Inst{1-0} = Qd4{1-0};
}
-class Enc_13336212 : OpcodeHexagon {
- bits <4> Rd16;
- let Inst{3-0} = Rd16{3-0};
- bits <1> n1;
- let Inst{9-9} = n1{0-0};
+class Enc_7eb485 : OpcodeHexagon {
+ bits <2> Ii;
+ let Inst{13-13} = Ii{1-1};
+ let Inst{6-6} = Ii{0-0};
+ bits <6> II;
+ let Inst{5-0} = II{5-0};
+ bits <5> Ru32;
+ let Inst{20-16} = Ru32{4-0};
+ bits <3> Nt8;
+ let Inst{10-8} = Nt8{2-0};
}
-class Enc_15008287 : OpcodeHexagon {
- bits <5> Vu32;
- let Inst{20-16} = Vu32{4-0};
- bits <3> Rt8;
- let Inst{2-0} = Rt8{2-0};
- bits <5> Vx32;
- let Inst{7-3} = Vx32{4-0};
- bits <5> Vy32;
- let Inst{12-8} = Vy32{4-0};
+class Enc_864a5a : OpcodeHexagon {
+ bits <9> Ii;
+ let Inst{12-8} = Ii{8-4};
+ let Inst{4-3} = Ii{3-2};
+ bits <5> Rs32;
+ let Inst{20-16} = Rs32{4-0};
}
-class Enc_4897205 : OpcodeHexagon {
- bits <2> Qs4;
- let Inst{9-8} = Qs4{1-0};
- bits <2> Qd4;
- let Inst{1-0} = Qd4{1-0};
+class Enc_c2b48e : OpcodeHexagon {
+ bits <5> Rs32;
+ let Inst{20-16} = Rs32{4-0};
+ bits <5> Rt32;
+ let Inst{12-8} = Rt32{4-0};
+ bits <2> Pd4;
+ let Inst{1-0} = Pd4{1-0};
}
-class Enc_8038806 : OpcodeHexagon {
- bits <4> Ii;
- let Inst{11-8} = Ii{3-0};
+class Enc_8c6530 : OpcodeHexagon {
+ bits <5> Rtt32;
+ let Inst{12-8} = Rtt32{4-0};
bits <5> Rss32;
let Inst{20-16} = Rss32{4-0};
- bits <5> Rd32;
- let Inst{4-0} = Rd32{4-0};
-}
-class Enc_12669374 : OpcodeHexagon {
- bits <5> Vu32;
- let Inst{12-8} = Vu32{4-0};
- bits <5> Vxx32;
- let Inst{4-0} = Vxx32{4-0};
+ bits <2> Pu4;
+ let Inst{6-5} = Pu4{1-0};
+ bits <5> Rdd32;
+ let Inst{4-0} = Rdd32{4-0};
}
-class Enc_971347 : OpcodeHexagon {
- bits <4> Ii;
- let Inst{8-5} = Ii{3-0};
- bits <1> Mu2;
- let Inst{13-13} = Mu2{0-0};
- bits <5> Ryy32;
- let Inst{4-0} = Ryy32{4-0};
- bits <5> Rx32;
- let Inst{20-16} = Rx32{4-0};
+class Enc_448f7f : OpcodeHexagon {
+ bits <11> Ii;
+ let Inst{26-25} = Ii{10-9};
+ let Inst{13-13} = Ii{8-8};
+ let Inst{7-0} = Ii{7-0};
+ bits <5> Rs32;
+ let Inst{20-16} = Rs32{4-0};
+ bits <5> Rt32;
+ let Inst{12-8} = Rt32{4-0};
}
-class Enc_1997594 : OpcodeHexagon {
+class Enc_da8d43 : OpcodeHexagon {
+ bits <6> Ii;
+ let Inst{13-13} = Ii{5-5};
+ let Inst{7-3} = Ii{4-0};
+ bits <2> Pv4;
+ let Inst{1-0} = Pv4{1-0};
bits <5> Rs32;
let Inst{20-16} = Rs32{4-0};
bits <5> Rt32;
let Inst{12-8} = Rt32{4-0};
- bits <5> Rdd32;
- let Inst{4-0} = Rdd32{4-0};
}
-class Enc_11940513 : OpcodeHexagon {
+class Enc_a6ce9c : OpcodeHexagon {
+ bits <6> Ii;
+ let Inst{3-0} = Ii{5-2};
+ bits <4> Rs16;
+ let Inst{7-4} = Rs16{3-0};
+}
+class Enc_eca7c8 : OpcodeHexagon {
bits <2> Ii;
let Inst{13-13} = Ii{1-1};
let Inst{7-7} = Ii{0-0};
- bits <2> Pv4;
- let Inst{6-5} = Pv4{1-0};
bits <5> Rs32;
let Inst{20-16} = Rs32{4-0};
bits <5> Ru32;
@@ -4079,104 +3229,13 @@ class Enc_11940513 : OpcodeHexagon {
bits <5> Rt32;
let Inst{4-0} = Rt32{4-0};
}
-class Enc_2735552 : OpcodeHexagon {
+class Enc_4b39e4 : OpcodeHexagon {
bits <3> Ii;
- let Inst{10-8} = Ii{2-0};
- bits <2> Pv4;
- let Inst{12-11} = Pv4{1-0};
- bits <3> Os8;
- let Inst{2-0} = Os8{2-0};
- bits <5> Rx32;
- let Inst{20-16} = Rx32{4-0};
-}
-class Enc_16410950 : OpcodeHexagon {
- bits <1> Mu2;
- let Inst{13-13} = Mu2{0-0};
- bits <5> Rt32;
- let Inst{12-8} = Rt32{4-0};
- bits <5> Vs32;
- let Inst{7-3} = Vs32{4-0};
- bits <5> Rx32;
- let Inst{20-16} = Rx32{4-0};
-}
-class Enc_6226085 : OpcodeHexagon {
- bits <5> Ii;
- let Inst{12-8} = Ii{4-0};
- bits <5> II;
- let Inst{22-21} = II{4-3};
- let Inst{7-5} = II{2-0};
- bits <5> Rd32;
- let Inst{4-0} = Rd32{4-0};
-}
-class Enc_14193700 : OpcodeHexagon {
- bits <6> II;
- let Inst{5-0} = II{5-0};
- bits <3> Nt8;
- let Inst{10-8} = Nt8{2-0};
- bits <5> Re32;
- let Inst{20-16} = Re32{4-0};
-}
-class Enc_15763937 : OpcodeHexagon {
- bits <11> Ii;
- let Inst{21-20} = Ii{10-9};
- let Inst{7-1} = Ii{8-2};
- bits <3> Ns8;
- let Inst{18-16} = Ns8{2-0};
- bits <6> n1;
- let Inst{29-29} = n1{5-5};
- let Inst{26-25} = n1{4-3};
- let Inst{23-22} = n1{2-1};
- let Inst{13-13} = n1{0-0};
-}
-class Enc_2492727 : OpcodeHexagon {
- bits <5> Rss32;
- let Inst{20-16} = Rss32{4-0};
- bits <5> Rt32;
- let Inst{12-8} = Rt32{4-0};
- bits <2> Pd4;
- let Inst{1-0} = Pd4{1-0};
-}
-class Enc_13425035 : OpcodeHexagon {
- bits <2> Qv4;
- let Inst{12-11} = Qv4{1-0};
- bits <1> Mu2;
- let Inst{13-13} = Mu2{0-0};
- bits <5> Vs32;
- let Inst{4-0} = Vs32{4-0};
- bits <5> Rx32;
- let Inst{20-16} = Rx32{4-0};
-}
-class Enc_4135257 : OpcodeHexagon {
- bits <4> Ii;
- let Inst{10-8} = Ii{3-1};
- bits <4> Rs16;
- let Inst{7-4} = Rs16{3-0};
- bits <4> Rd16;
- let Inst{3-0} = Rd16{3-0};
-}
-class Enc_14631806 : OpcodeHexagon {
+ let Inst{7-5} = Ii{2-0};
bits <5> Vu32;
let Inst{12-8} = Vu32{4-0};
+ bits <5> Vv32;
+ let Inst{20-16} = Vv32{4-0};
bits <5> Vdd32;
let Inst{4-0} = Vdd32{4-0};
}
-class Enc_12397062 : OpcodeHexagon {
- bits <3> Ii;
- let Inst{10-8} = Ii{2-0};
- bits <2> Qv4;
- let Inst{12-11} = Qv4{1-0};
- bits <5> Vs32;
- let Inst{4-0} = Vs32{4-0};
- bits <5> Rx32;
- let Inst{20-16} = Rx32{4-0};
-}
-class Enc_11959851 : OpcodeHexagon {
- bits <7> Ii;
- let Inst{6-3} = Ii{6-3};
- bits <2> Pv4;
- let Inst{1-0} = Pv4{1-0};
- bits <5> Rtt32;
- let Inst{12-8} = Rtt32{4-0};
- bits <5> Rx32;
- let Inst{20-16} = Rx32{4-0};
-}
diff --git a/lib/Target/Hexagon/HexagonDepInstrInfo.td b/lib/Target/Hexagon/HexagonDepInstrInfo.td
index d910d4af2191..2dc74632e9be 100644
--- a/lib/Target/Hexagon/HexagonDepInstrInfo.td
+++ b/lib/Target/Hexagon/HexagonDepInstrInfo.td
@@ -11,36 +11,39 @@ def A2_abs : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rs32),
"$Rd32 = abs($Rs32)",
-S_2op_tc_2_SLOT23, TypeS_2op>, Enc_4075554 {
+tc_94e6ffd9, TypeS_2op>, Enc_5e2823 {
let Inst{13-5} = 0b000000100;
let Inst{31-21} = 0b10001100100;
let hasNewValue = 1;
let opNewValue = 0;
+let prefersSlot3 = 1;
}
def A2_absp : HInst<
(outs DoubleRegs:$Rdd32),
(ins DoubleRegs:$Rss32),
"$Rdd32 = abs($Rss32)",
-S_2op_tc_1_SLOT23, TypeS_2op>, Enc_13133231 {
+tc_94e6ffd9, TypeS_2op>, Enc_b9c5fb {
let Inst{13-5} = 0b000000110;
let Inst{31-21} = 0b10000000100;
+let prefersSlot3 = 1;
}
def A2_abssat : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rs32),
"$Rd32 = abs($Rs32):sat",
-S_2op_tc_2_SLOT23, TypeS_2op>, Enc_4075554 {
+tc_94e6ffd9, TypeS_2op>, Enc_5e2823 {
let Inst{13-5} = 0b000000101;
let Inst{31-21} = 0b10001100100;
let hasNewValue = 1;
let opNewValue = 0;
+let prefersSlot3 = 1;
let Defs = [USR_OVF];
}
def A2_add : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rs32, IntRegs:$Rt32),
"$Rd32 = add($Rs32,$Rt32)",
-ALU32_3op_tc_1_SLOT0123, TypeALU32_3op>, Enc_14071773, PredNewRel, ImmRegRel {
+tc_548f402d, TypeALU32_3op>, Enc_5ab2be, PredNewRel, ImmRegRel {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11110011000;
@@ -56,145 +59,157 @@ def A2_addh_h16_hh : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rt32, IntRegs:$Rs32),
"$Rd32 = add($Rt32.h,$Rs32.h):<<16",
-ALU64_tc_1_SLOT23, TypeALU64>, Enc_8605375 {
+tc_bd16579e, TypeALU64>, Enc_bd6011 {
let Inst{7-5} = 0b011;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11010101010;
let hasNewValue = 1;
let opNewValue = 0;
+let prefersSlot3 = 1;
}
def A2_addh_h16_hl : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rt32, IntRegs:$Rs32),
"$Rd32 = add($Rt32.h,$Rs32.l):<<16",
-ALU64_tc_1_SLOT23, TypeALU64>, Enc_8605375 {
+tc_bd16579e, TypeALU64>, Enc_bd6011 {
let Inst{7-5} = 0b010;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11010101010;
let hasNewValue = 1;
let opNewValue = 0;
+let prefersSlot3 = 1;
}
def A2_addh_h16_lh : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rt32, IntRegs:$Rs32),
"$Rd32 = add($Rt32.l,$Rs32.h):<<16",
-ALU64_tc_1_SLOT23, TypeALU64>, Enc_8605375 {
+tc_bd16579e, TypeALU64>, Enc_bd6011 {
let Inst{7-5} = 0b001;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11010101010;
let hasNewValue = 1;
let opNewValue = 0;
+let prefersSlot3 = 1;
}
def A2_addh_h16_ll : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rt32, IntRegs:$Rs32),
"$Rd32 = add($Rt32.l,$Rs32.l):<<16",
-ALU64_tc_1_SLOT23, TypeALU64>, Enc_8605375 {
+tc_bd16579e, TypeALU64>, Enc_bd6011 {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11010101010;
let hasNewValue = 1;
let opNewValue = 0;
+let prefersSlot3 = 1;
}
def A2_addh_h16_sat_hh : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rt32, IntRegs:$Rs32),
"$Rd32 = add($Rt32.h,$Rs32.h):sat:<<16",
-ALU64_tc_2_SLOT23, TypeALU64>, Enc_8605375 {
+tc_47ab9233, TypeALU64>, Enc_bd6011 {
let Inst{7-5} = 0b111;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11010101010;
let hasNewValue = 1;
let opNewValue = 0;
+let prefersSlot3 = 1;
let Defs = [USR_OVF];
}
def A2_addh_h16_sat_hl : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rt32, IntRegs:$Rs32),
"$Rd32 = add($Rt32.h,$Rs32.l):sat:<<16",
-ALU64_tc_2_SLOT23, TypeALU64>, Enc_8605375 {
+tc_47ab9233, TypeALU64>, Enc_bd6011 {
let Inst{7-5} = 0b110;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11010101010;
let hasNewValue = 1;
let opNewValue = 0;
+let prefersSlot3 = 1;
let Defs = [USR_OVF];
}
def A2_addh_h16_sat_lh : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rt32, IntRegs:$Rs32),
"$Rd32 = add($Rt32.l,$Rs32.h):sat:<<16",
-ALU64_tc_2_SLOT23, TypeALU64>, Enc_8605375 {
+tc_47ab9233, TypeALU64>, Enc_bd6011 {
let Inst{7-5} = 0b101;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11010101010;
let hasNewValue = 1;
let opNewValue = 0;
+let prefersSlot3 = 1;
let Defs = [USR_OVF];
}
def A2_addh_h16_sat_ll : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rt32, IntRegs:$Rs32),
"$Rd32 = add($Rt32.l,$Rs32.l):sat:<<16",
-ALU64_tc_2_SLOT23, TypeALU64>, Enc_8605375 {
+tc_47ab9233, TypeALU64>, Enc_bd6011 {
let Inst{7-5} = 0b100;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11010101010;
let hasNewValue = 1;
let opNewValue = 0;
+let prefersSlot3 = 1;
let Defs = [USR_OVF];
}
def A2_addh_l16_hl : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rt32, IntRegs:$Rs32),
"$Rd32 = add($Rt32.l,$Rs32.h)",
-ALU64_tc_1_SLOT23, TypeALU64>, Enc_8605375 {
+tc_7ca2ea10, TypeALU64>, Enc_bd6011 {
let Inst{7-5} = 0b010;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11010101000;
let hasNewValue = 1;
let opNewValue = 0;
+let prefersSlot3 = 1;
}
def A2_addh_l16_ll : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rt32, IntRegs:$Rs32),
"$Rd32 = add($Rt32.l,$Rs32.l)",
-ALU64_tc_1_SLOT23, TypeALU64>, Enc_8605375 {
+tc_7ca2ea10, TypeALU64>, Enc_bd6011 {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11010101000;
let hasNewValue = 1;
let opNewValue = 0;
+let prefersSlot3 = 1;
}
def A2_addh_l16_sat_hl : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rt32, IntRegs:$Rs32),
"$Rd32 = add($Rt32.l,$Rs32.h):sat",
-ALU64_tc_2_SLOT23, TypeALU64>, Enc_8605375 {
+tc_47ab9233, TypeALU64>, Enc_bd6011 {
let Inst{7-5} = 0b110;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11010101000;
let hasNewValue = 1;
let opNewValue = 0;
+let prefersSlot3 = 1;
let Defs = [USR_OVF];
}
def A2_addh_l16_sat_ll : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rt32, IntRegs:$Rs32),
"$Rd32 = add($Rt32.l,$Rs32.l):sat",
-ALU64_tc_2_SLOT23, TypeALU64>, Enc_8605375 {
+tc_47ab9233, TypeALU64>, Enc_bd6011 {
let Inst{7-5} = 0b100;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11010101000;
let hasNewValue = 1;
let opNewValue = 0;
+let prefersSlot3 = 1;
let Defs = [USR_OVF];
}
def A2_addi : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rs32, s32_0Imm:$Ii),
"$Rd32 = add($Rs32,#$Ii)",
-ALU32_ADDI_tc_1_SLOT0123, TypeALU32_ADDI>, Enc_11542684, PredNewRel, ImmRegRel {
+tc_548f402d, TypeALU32_ADDI>, Enc_cb9321, PredNewRel, ImmRegRel {
let Inst{31-28} = 0b1011;
let hasNewValue = 1;
let opNewValue = 0;
@@ -213,7 +228,7 @@ def A2_addp : HInst<
(outs DoubleRegs:$Rdd32),
(ins DoubleRegs:$Rss32, DoubleRegs:$Rtt32),
"$Rdd32 = add($Rss32,$Rtt32)",
-ALU64_tc_1_SLOT23, TypeALU64>, Enc_8333157 {
+tc_9c18c9a5, TypeALU64>, Enc_a56825 {
let Inst{7-5} = 0b111;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11010011000;
@@ -224,10 +239,11 @@ def A2_addpsat : HInst<
(outs DoubleRegs:$Rdd32),
(ins DoubleRegs:$Rss32, DoubleRegs:$Rtt32),
"$Rdd32 = add($Rss32,$Rtt32):sat",
-ALU64_tc_2_SLOT23, TypeALU64>, Enc_8333157 {
+tc_47ab9233, TypeALU64>, Enc_a56825 {
let Inst{7-5} = 0b101;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11010011011;
+let prefersSlot3 = 1;
let Defs = [USR_OVF];
let isCommutable = 1;
}
@@ -235,12 +251,13 @@ def A2_addsat : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rs32, IntRegs:$Rt32),
"$Rd32 = add($Rs32,$Rt32):sat",
-ALU32_3op_tc_2_SLOT0123, TypeALU32_3op>, Enc_14071773 {
+tc_b0f50e3c, TypeALU32_3op>, Enc_5ab2be {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11110110010;
let hasNewValue = 1;
let opNewValue = 0;
+let prefersSlot3 = 1;
let Defs = [USR_OVF];
let InputType = "reg";
let isCommutable = 1;
@@ -249,32 +266,34 @@ def A2_addsp : HInst<
(outs DoubleRegs:$Rdd32),
(ins IntRegs:$Rs32, DoubleRegs:$Rtt32),
"$Rdd32 = add($Rs32,$Rtt32)",
-ALU64_tc_1_SLOT23, TypeALU64> {
+tc_bd16579e, TypeALU64> {
let isPseudo = 1;
}
def A2_addsph : HInst<
(outs DoubleRegs:$Rdd32),
(ins DoubleRegs:$Rss32, DoubleRegs:$Rtt32),
"$Rdd32 = add($Rss32,$Rtt32):raw:hi",
-ALU64_tc_1_SLOT23, TypeALU64>, Enc_8333157 {
+tc_bd16579e, TypeALU64>, Enc_a56825 {
let Inst{7-5} = 0b111;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11010011011;
+let prefersSlot3 = 1;
}
def A2_addspl : HInst<
(outs DoubleRegs:$Rdd32),
(ins DoubleRegs:$Rss32, DoubleRegs:$Rtt32),
"$Rdd32 = add($Rss32,$Rtt32):raw:lo",
-ALU64_tc_1_SLOT23, TypeALU64>, Enc_8333157 {
+tc_bd16579e, TypeALU64>, Enc_a56825 {
let Inst{7-5} = 0b110;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11010011011;
+let prefersSlot3 = 1;
}
def A2_and : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rs32, IntRegs:$Rt32),
"$Rd32 = and($Rs32,$Rt32)",
-ALU32_3op_tc_1_SLOT0123, TypeALU32_3op>, Enc_14071773, PredNewRel, ImmRegRel {
+tc_548f402d, TypeALU32_3op>, Enc_5ab2be, PredNewRel, ImmRegRel {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11110001000;
@@ -290,7 +309,7 @@ def A2_andir : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rs32, s32_0Imm:$Ii),
"$Rd32 = and($Rs32,#$Ii)",
-ALU32_2op_tc_1_SLOT0123, TypeALU32_2op>, Enc_13472494, ImmRegRel {
+tc_548f402d, TypeALU32_2op>, Enc_140c83, ImmRegRel {
let Inst{31-22} = 0b0111011000;
let hasNewValue = 1;
let opNewValue = 0;
@@ -306,7 +325,7 @@ def A2_andp : HInst<
(outs DoubleRegs:$Rdd32),
(ins DoubleRegs:$Rss32, DoubleRegs:$Rtt32),
"$Rdd32 = and($Rss32,$Rtt32)",
-ALU64_tc_1_SLOT23, TypeALU64>, Enc_8333157 {
+tc_9c18c9a5, TypeALU64>, Enc_a56825 {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11010011111;
@@ -316,7 +335,7 @@ def A2_aslh : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rs32),
"$Rd32 = aslh($Rs32)",
-ALU32_2op_tc_1_SLOT0123, TypeALU32_2op>, Enc_4075554, PredNewRel {
+tc_f16d5b17, TypeALU32_2op>, Enc_5e2823, PredNewRel {
let Inst{13-5} = 0b000000000;
let Inst{31-21} = 0b01110000000;
let hasNewValue = 1;
@@ -328,7 +347,7 @@ def A2_asrh : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rs32),
"$Rd32 = asrh($Rs32)",
-ALU32_2op_tc_1_SLOT0123, TypeALU32_2op>, Enc_4075554, PredNewRel {
+tc_f16d5b17, TypeALU32_2op>, Enc_5e2823, PredNewRel {
let Inst{13-5} = 0b000000000;
let Inst{31-21} = 0b01110000001;
let hasNewValue = 1;
@@ -340,7 +359,7 @@ def A2_combine_hh : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rt32, IntRegs:$Rs32),
"$Rd32 = combine($Rt32.h,$Rs32.h)",
-ALU32_3op_tc_1_SLOT0123, TypeALU32_3op>, Enc_8605375 {
+tc_548f402d, TypeALU32_3op>, Enc_bd6011 {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11110011100;
@@ -352,7 +371,7 @@ def A2_combine_hl : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rt32, IntRegs:$Rs32),
"$Rd32 = combine($Rt32.h,$Rs32.l)",
-ALU32_3op_tc_1_SLOT0123, TypeALU32_3op>, Enc_8605375 {
+tc_548f402d, TypeALU32_3op>, Enc_bd6011 {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11110011101;
@@ -364,7 +383,7 @@ def A2_combine_lh : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rt32, IntRegs:$Rs32),
"$Rd32 = combine($Rt32.l,$Rs32.h)",
-ALU32_3op_tc_1_SLOT0123, TypeALU32_3op>, Enc_8605375 {
+tc_548f402d, TypeALU32_3op>, Enc_bd6011 {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11110011110;
@@ -376,7 +395,7 @@ def A2_combine_ll : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rt32, IntRegs:$Rs32),
"$Rd32 = combine($Rt32.l,$Rs32.l)",
-ALU32_3op_tc_1_SLOT0123, TypeALU32_3op>, Enc_8605375 {
+tc_548f402d, TypeALU32_3op>, Enc_bd6011 {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11110011111;
@@ -388,7 +407,7 @@ def A2_combineii : HInst<
(outs DoubleRegs:$Rdd32),
(ins s32_0Imm:$Ii, s8_0Imm:$II),
"$Rdd32 = combine(#$Ii,#$II)",
-ALU32_2op_tc_1_SLOT0123, TypeALU32_2op>, Enc_14007201 {
+tc_548f402d, TypeALU32_2op>, Enc_18c338 {
let Inst{31-23} = 0b011111000;
let isReMaterializable = 1;
let isAsCheapAsAMove = 1;
@@ -403,7 +422,7 @@ def A2_combinew : HInst<
(outs DoubleRegs:$Rdd32),
(ins IntRegs:$Rs32, IntRegs:$Rt32),
"$Rdd32 = combine($Rs32,$Rt32)",
-ALU32_3op_tc_1_SLOT0123, TypeALU32_3op>, Enc_1997594, PredNewRel {
+tc_548f402d, TypeALU32_3op>, Enc_be32a5, PredNewRel {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11110101000;
@@ -415,87 +434,95 @@ def A2_max : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rs32, IntRegs:$Rt32),
"$Rd32 = max($Rs32,$Rt32)",
-ALU64_tc_2_SLOT23, TypeALU64>, Enc_14071773 {
+tc_47ab9233, TypeALU64>, Enc_5ab2be {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11010101110;
let hasNewValue = 1;
let opNewValue = 0;
+let prefersSlot3 = 1;
}
def A2_maxp : HInst<
(outs DoubleRegs:$Rdd32),
(ins DoubleRegs:$Rss32, DoubleRegs:$Rtt32),
"$Rdd32 = max($Rss32,$Rtt32)",
-ALU64_tc_2_SLOT23, TypeALU64>, Enc_8333157 {
+tc_47ab9233, TypeALU64>, Enc_a56825 {
let Inst{7-5} = 0b100;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11010011110;
+let prefersSlot3 = 1;
}
def A2_maxu : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rs32, IntRegs:$Rt32),
"$Rd32 = maxu($Rs32,$Rt32)",
-ALU64_tc_2_SLOT23, TypeALU64>, Enc_14071773 {
+tc_47ab9233, TypeALU64>, Enc_5ab2be {
let Inst{7-5} = 0b100;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11010101110;
let hasNewValue = 1;
let opNewValue = 0;
+let prefersSlot3 = 1;
}
def A2_maxup : HInst<
(outs DoubleRegs:$Rdd32),
(ins DoubleRegs:$Rss32, DoubleRegs:$Rtt32),
"$Rdd32 = maxu($Rss32,$Rtt32)",
-ALU64_tc_2_SLOT23, TypeALU64>, Enc_8333157 {
+tc_47ab9233, TypeALU64>, Enc_a56825 {
let Inst{7-5} = 0b101;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11010011110;
+let prefersSlot3 = 1;
}
def A2_min : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rt32, IntRegs:$Rs32),
"$Rd32 = min($Rt32,$Rs32)",
-ALU64_tc_2_SLOT23, TypeALU64>, Enc_8605375 {
+tc_47ab9233, TypeALU64>, Enc_bd6011 {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11010101101;
let hasNewValue = 1;
let opNewValue = 0;
+let prefersSlot3 = 1;
}
def A2_minp : HInst<
(outs DoubleRegs:$Rdd32),
(ins DoubleRegs:$Rtt32, DoubleRegs:$Rss32),
"$Rdd32 = min($Rtt32,$Rss32)",
-ALU64_tc_2_SLOT23, TypeALU64>, Enc_11687333 {
+tc_47ab9233, TypeALU64>, Enc_ea23e4 {
let Inst{7-5} = 0b110;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11010011101;
+let prefersSlot3 = 1;
}
def A2_minu : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rt32, IntRegs:$Rs32),
"$Rd32 = minu($Rt32,$Rs32)",
-ALU64_tc_2_SLOT23, TypeALU64>, Enc_8605375 {
+tc_47ab9233, TypeALU64>, Enc_bd6011 {
let Inst{7-5} = 0b100;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11010101101;
let hasNewValue = 1;
let opNewValue = 0;
+let prefersSlot3 = 1;
}
def A2_minup : HInst<
(outs DoubleRegs:$Rdd32),
(ins DoubleRegs:$Rtt32, DoubleRegs:$Rss32),
"$Rdd32 = minu($Rtt32,$Rss32)",
-ALU64_tc_2_SLOT23, TypeALU64>, Enc_11687333 {
+tc_47ab9233, TypeALU64>, Enc_ea23e4 {
let Inst{7-5} = 0b111;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11010011101;
+let prefersSlot3 = 1;
}
def A2_neg : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rs32),
"$Rd32 = neg($Rs32)",
-PSEUDO, TypeALU32_2op> {
+tc_f16d5b17, TypeALU32_2op> {
let hasNewValue = 1;
let opNewValue = 0;
let isPseudo = 1;
@@ -505,7 +532,7 @@ def A2_negp : HInst<
(outs DoubleRegs:$Rdd32),
(ins DoubleRegs:$Rss32),
"$Rdd32 = neg($Rss32)",
-S_2op_tc_1_SLOT23, TypeS_2op>, Enc_13133231 {
+tc_b86c7e8b, TypeS_2op>, Enc_b9c5fb {
let Inst{13-5} = 0b000000101;
let Inst{31-21} = 0b10000000100;
}
@@ -513,18 +540,19 @@ def A2_negsat : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rs32),
"$Rd32 = neg($Rs32):sat",
-S_2op_tc_2_SLOT23, TypeS_2op>, Enc_4075554 {
+tc_94e6ffd9, TypeS_2op>, Enc_5e2823 {
let Inst{13-5} = 0b000000110;
let Inst{31-21} = 0b10001100100;
let hasNewValue = 1;
let opNewValue = 0;
+let prefersSlot3 = 1;
let Defs = [USR_OVF];
}
def A2_nop : HInst<
(outs),
(ins),
"nop",
-ALU32_2op_tc_1_SLOT0123, TypeALU32_2op>, Enc_0 {
+tc_e2c31426, TypeALU32_2op>, Enc_e3b0c4 {
let Inst{13-0} = 0b00000000000000;
let Inst{31-16} = 0b0111111100000000;
}
@@ -532,7 +560,7 @@ def A2_not : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rs32),
"$Rd32 = not($Rs32)",
-ALU32_2op_tc_1_SLOT0123, TypeALU32_2op> {
+tc_f16d5b17, TypeALU32_2op> {
let hasNewValue = 1;
let opNewValue = 0;
let isPseudo = 1;
@@ -542,7 +570,7 @@ def A2_notp : HInst<
(outs DoubleRegs:$Rdd32),
(ins DoubleRegs:$Rss32),
"$Rdd32 = not($Rss32)",
-S_2op_tc_1_SLOT23, TypeS_2op>, Enc_13133231 {
+tc_b86c7e8b, TypeS_2op>, Enc_b9c5fb {
let Inst{13-5} = 0b000000100;
let Inst{31-21} = 0b10000000100;
}
@@ -550,7 +578,7 @@ def A2_or : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rs32, IntRegs:$Rt32),
"$Rd32 = or($Rs32,$Rt32)",
-ALU32_3op_tc_1_SLOT0123, TypeALU32_3op>, Enc_14071773, PredNewRel, ImmRegRel {
+tc_548f402d, TypeALU32_3op>, Enc_5ab2be, PredNewRel, ImmRegRel {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11110001001;
@@ -566,7 +594,7 @@ def A2_orir : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rs32, s32_0Imm:$Ii),
"$Rd32 = or($Rs32,#$Ii)",
-ALU32_2op_tc_1_SLOT0123, TypeALU32_2op>, Enc_13472494, ImmRegRel {
+tc_548f402d, TypeALU32_2op>, Enc_140c83, ImmRegRel {
let Inst{31-22} = 0b0111011010;
let hasNewValue = 1;
let opNewValue = 0;
@@ -582,7 +610,7 @@ def A2_orp : HInst<
(outs DoubleRegs:$Rdd32),
(ins DoubleRegs:$Rss32, DoubleRegs:$Rtt32),
"$Rdd32 = or($Rss32,$Rtt32)",
-ALU64_tc_1_SLOT23, TypeALU64>, Enc_8333157 {
+tc_9c18c9a5, TypeALU64>, Enc_a56825 {
let Inst{7-5} = 0b010;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11010011111;
@@ -592,7 +620,7 @@ def A2_paddf : HInst<
(outs IntRegs:$Rd32),
(ins PredRegs:$Pu4, IntRegs:$Rs32, IntRegs:$Rt32),
"if (!$Pu4) $Rd32 = add($Rs32,$Rt32)",
-ALU32_3op_tc_1_SLOT0123, TypeALU32_3op>, Enc_9626139, PredNewRel, ImmRegRel {
+tc_1b6011fb, TypeALU32_3op>, Enc_ea4c54, PredNewRel, ImmRegRel {
let Inst{7-7} = 0b1;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11111011000;
@@ -608,7 +636,7 @@ def A2_paddfnew : HInst<
(outs IntRegs:$Rd32),
(ins PredRegs:$Pu4, IntRegs:$Rs32, IntRegs:$Rt32),
"if (!$Pu4.new) $Rd32 = add($Rs32,$Rt32)",
-ALU32_3op_tc_1_SLOT0123, TypeALU32_3op>, Enc_9626139, PredNewRel, ImmRegRel {
+tc_28d296df, TypeALU32_3op>, Enc_ea4c54, PredNewRel, ImmRegRel {
let Inst{7-7} = 0b1;
let Inst{13-13} = 0b1;
let Inst{31-21} = 0b11111011000;
@@ -625,7 +653,7 @@ def A2_paddif : HInst<
(outs IntRegs:$Rd32),
(ins PredRegs:$Pu4, IntRegs:$Rs32, s32_0Imm:$Ii),
"if (!$Pu4) $Rd32 = add($Rs32,#$Ii)",
-ALU32_2op_tc_1_SLOT0123, TypeALU32_2op>, Enc_10568534, PredNewRel, ImmRegRel {
+tc_1b6011fb, TypeALU32_2op>, Enc_e38e1f, PredNewRel, ImmRegRel {
let Inst{13-13} = 0b0;
let Inst{31-23} = 0b011101001;
let isPredicated = 1;
@@ -645,7 +673,7 @@ def A2_paddifnew : HInst<
(outs IntRegs:$Rd32),
(ins PredRegs:$Pu4, IntRegs:$Rs32, s32_0Imm:$Ii),
"if (!$Pu4.new) $Rd32 = add($Rs32,#$Ii)",
-ALU32_2op_tc_1_SLOT0123, TypeALU32_2op>, Enc_10568534, PredNewRel, ImmRegRel {
+tc_28d296df, TypeALU32_2op>, Enc_e38e1f, PredNewRel, ImmRegRel {
let Inst{13-13} = 0b1;
let Inst{31-23} = 0b011101001;
let isPredicated = 1;
@@ -666,7 +694,7 @@ def A2_paddit : HInst<
(outs IntRegs:$Rd32),
(ins PredRegs:$Pu4, IntRegs:$Rs32, s32_0Imm:$Ii),
"if ($Pu4) $Rd32 = add($Rs32,#$Ii)",
-ALU32_2op_tc_1_SLOT0123, TypeALU32_2op>, Enc_10568534, PredNewRel, ImmRegRel {
+tc_1b6011fb, TypeALU32_2op>, Enc_e38e1f, PredNewRel, ImmRegRel {
let Inst{13-13} = 0b0;
let Inst{31-23} = 0b011101000;
let isPredicated = 1;
@@ -685,7 +713,7 @@ def A2_padditnew : HInst<
(outs IntRegs:$Rd32),
(ins PredRegs:$Pu4, IntRegs:$Rs32, s32_0Imm:$Ii),
"if ($Pu4.new) $Rd32 = add($Rs32,#$Ii)",
-ALU32_2op_tc_1_SLOT0123, TypeALU32_2op>, Enc_10568534, PredNewRel, ImmRegRel {
+tc_28d296df, TypeALU32_2op>, Enc_e38e1f, PredNewRel, ImmRegRel {
let Inst{13-13} = 0b1;
let Inst{31-23} = 0b011101000;
let isPredicated = 1;
@@ -705,7 +733,7 @@ def A2_paddt : HInst<
(outs IntRegs:$Rd32),
(ins PredRegs:$Pu4, IntRegs:$Rs32, IntRegs:$Rt32),
"if ($Pu4) $Rd32 = add($Rs32,$Rt32)",
-ALU32_3op_tc_1_SLOT0123, TypeALU32_3op>, Enc_9626139, PredNewRel, ImmRegRel {
+tc_1b6011fb, TypeALU32_3op>, Enc_ea4c54, PredNewRel, ImmRegRel {
let Inst{7-7} = 0b0;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11111011000;
@@ -720,7 +748,7 @@ def A2_paddtnew : HInst<
(outs IntRegs:$Rd32),
(ins PredRegs:$Pu4, IntRegs:$Rs32, IntRegs:$Rt32),
"if ($Pu4.new) $Rd32 = add($Rs32,$Rt32)",
-ALU32_3op_tc_1_SLOT0123, TypeALU32_3op>, Enc_9626139, PredNewRel, ImmRegRel {
+tc_28d296df, TypeALU32_3op>, Enc_ea4c54, PredNewRel, ImmRegRel {
let Inst{7-7} = 0b0;
let Inst{13-13} = 0b1;
let Inst{31-21} = 0b11111011000;
@@ -736,7 +764,7 @@ def A2_pandf : HInst<
(outs IntRegs:$Rd32),
(ins PredRegs:$Pu4, IntRegs:$Rs32, IntRegs:$Rt32),
"if (!$Pu4) $Rd32 = and($Rs32,$Rt32)",
-ALU32_3op_tc_1_SLOT0123, TypeALU32_3op>, Enc_9626139, PredNewRel {
+tc_1b6011fb, TypeALU32_3op>, Enc_ea4c54, PredNewRel {
let Inst{7-7} = 0b1;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11111001000;
@@ -750,7 +778,7 @@ def A2_pandfnew : HInst<
(outs IntRegs:$Rd32),
(ins PredRegs:$Pu4, IntRegs:$Rs32, IntRegs:$Rt32),
"if (!$Pu4.new) $Rd32 = and($Rs32,$Rt32)",
-ALU32_3op_tc_1_SLOT0123, TypeALU32_3op>, Enc_9626139, PredNewRel {
+tc_28d296df, TypeALU32_3op>, Enc_ea4c54, PredNewRel {
let Inst{7-7} = 0b1;
let Inst{13-13} = 0b1;
let Inst{31-21} = 0b11111001000;
@@ -765,7 +793,7 @@ def A2_pandt : HInst<
(outs IntRegs:$Rd32),
(ins PredRegs:$Pu4, IntRegs:$Rs32, IntRegs:$Rt32),
"if ($Pu4) $Rd32 = and($Rs32,$Rt32)",
-ALU32_3op_tc_1_SLOT0123, TypeALU32_3op>, Enc_9626139, PredNewRel {
+tc_1b6011fb, TypeALU32_3op>, Enc_ea4c54, PredNewRel {
let Inst{7-7} = 0b0;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11111001000;
@@ -778,7 +806,7 @@ def A2_pandtnew : HInst<
(outs IntRegs:$Rd32),
(ins PredRegs:$Pu4, IntRegs:$Rs32, IntRegs:$Rt32),
"if ($Pu4.new) $Rd32 = and($Rs32,$Rt32)",
-ALU32_3op_tc_1_SLOT0123, TypeALU32_3op>, Enc_9626139, PredNewRel {
+tc_28d296df, TypeALU32_3op>, Enc_ea4c54, PredNewRel {
let Inst{7-7} = 0b0;
let Inst{13-13} = 0b1;
let Inst{31-21} = 0b11111001000;
@@ -792,7 +820,7 @@ def A2_porf : HInst<
(outs IntRegs:$Rd32),
(ins PredRegs:$Pu4, IntRegs:$Rs32, IntRegs:$Rt32),
"if (!$Pu4) $Rd32 = or($Rs32,$Rt32)",
-ALU32_3op_tc_1_SLOT0123, TypeALU32_3op>, Enc_9626139, PredNewRel {
+tc_1b6011fb, TypeALU32_3op>, Enc_ea4c54, PredNewRel {
let Inst{7-7} = 0b1;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11111001001;
@@ -806,7 +834,7 @@ def A2_porfnew : HInst<
(outs IntRegs:$Rd32),
(ins PredRegs:$Pu4, IntRegs:$Rs32, IntRegs:$Rt32),
"if (!$Pu4.new) $Rd32 = or($Rs32,$Rt32)",
-ALU32_3op_tc_1_SLOT0123, TypeALU32_3op>, Enc_9626139, PredNewRel {
+tc_28d296df, TypeALU32_3op>, Enc_ea4c54, PredNewRel {
let Inst{7-7} = 0b1;
let Inst{13-13} = 0b1;
let Inst{31-21} = 0b11111001001;
@@ -821,7 +849,7 @@ def A2_port : HInst<
(outs IntRegs:$Rd32),
(ins PredRegs:$Pu4, IntRegs:$Rs32, IntRegs:$Rt32),
"if ($Pu4) $Rd32 = or($Rs32,$Rt32)",
-ALU32_3op_tc_1_SLOT0123, TypeALU32_3op>, Enc_9626139, PredNewRel {
+tc_1b6011fb, TypeALU32_3op>, Enc_ea4c54, PredNewRel {
let Inst{7-7} = 0b0;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11111001001;
@@ -834,7 +862,7 @@ def A2_portnew : HInst<
(outs IntRegs:$Rd32),
(ins PredRegs:$Pu4, IntRegs:$Rs32, IntRegs:$Rt32),
"if ($Pu4.new) $Rd32 = or($Rs32,$Rt32)",
-ALU32_3op_tc_1_SLOT0123, TypeALU32_3op>, Enc_9626139, PredNewRel {
+tc_28d296df, TypeALU32_3op>, Enc_ea4c54, PredNewRel {
let Inst{7-7} = 0b0;
let Inst{13-13} = 0b1;
let Inst{31-21} = 0b11111001001;
@@ -848,7 +876,7 @@ def A2_psubf : HInst<
(outs IntRegs:$Rd32),
(ins PredRegs:$Pu4, IntRegs:$Rt32, IntRegs:$Rs32),
"if (!$Pu4) $Rd32 = sub($Rt32,$Rs32)",
-ALU32_3op_tc_1_SLOT0123, TypeALU32_3op>, Enc_1332717, PredNewRel {
+tc_1b6011fb, TypeALU32_3op>, Enc_9b0bc1, PredNewRel {
let Inst{7-7} = 0b1;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11111011001;
@@ -862,7 +890,7 @@ def A2_psubfnew : HInst<
(outs IntRegs:$Rd32),
(ins PredRegs:$Pu4, IntRegs:$Rt32, IntRegs:$Rs32),
"if (!$Pu4.new) $Rd32 = sub($Rt32,$Rs32)",
-ALU32_3op_tc_1_SLOT0123, TypeALU32_3op>, Enc_1332717, PredNewRel {
+tc_28d296df, TypeALU32_3op>, Enc_9b0bc1, PredNewRel {
let Inst{7-7} = 0b1;
let Inst{13-13} = 0b1;
let Inst{31-21} = 0b11111011001;
@@ -877,7 +905,7 @@ def A2_psubt : HInst<
(outs IntRegs:$Rd32),
(ins PredRegs:$Pu4, IntRegs:$Rt32, IntRegs:$Rs32),
"if ($Pu4) $Rd32 = sub($Rt32,$Rs32)",
-ALU32_3op_tc_1_SLOT0123, TypeALU32_3op>, Enc_1332717, PredNewRel {
+tc_1b6011fb, TypeALU32_3op>, Enc_9b0bc1, PredNewRel {
let Inst{7-7} = 0b0;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11111011001;
@@ -890,7 +918,7 @@ def A2_psubtnew : HInst<
(outs IntRegs:$Rd32),
(ins PredRegs:$Pu4, IntRegs:$Rt32, IntRegs:$Rs32),
"if ($Pu4.new) $Rd32 = sub($Rt32,$Rs32)",
-ALU32_3op_tc_1_SLOT0123, TypeALU32_3op>, Enc_1332717, PredNewRel {
+tc_28d296df, TypeALU32_3op>, Enc_9b0bc1, PredNewRel {
let Inst{7-7} = 0b0;
let Inst{13-13} = 0b1;
let Inst{31-21} = 0b11111011001;
@@ -904,7 +932,7 @@ def A2_pxorf : HInst<
(outs IntRegs:$Rd32),
(ins PredRegs:$Pu4, IntRegs:$Rs32, IntRegs:$Rt32),
"if (!$Pu4) $Rd32 = xor($Rs32,$Rt32)",
-ALU32_3op_tc_1_SLOT0123, TypeALU32_3op>, Enc_9626139, PredNewRel {
+tc_1b6011fb, TypeALU32_3op>, Enc_ea4c54, PredNewRel {
let Inst{7-7} = 0b1;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11111001011;
@@ -918,7 +946,7 @@ def A2_pxorfnew : HInst<
(outs IntRegs:$Rd32),
(ins PredRegs:$Pu4, IntRegs:$Rs32, IntRegs:$Rt32),
"if (!$Pu4.new) $Rd32 = xor($Rs32,$Rt32)",
-ALU32_3op_tc_1_SLOT0123, TypeALU32_3op>, Enc_9626139, PredNewRel {
+tc_28d296df, TypeALU32_3op>, Enc_ea4c54, PredNewRel {
let Inst{7-7} = 0b1;
let Inst{13-13} = 0b1;
let Inst{31-21} = 0b11111001011;
@@ -933,7 +961,7 @@ def A2_pxort : HInst<
(outs IntRegs:$Rd32),
(ins PredRegs:$Pu4, IntRegs:$Rs32, IntRegs:$Rt32),
"if ($Pu4) $Rd32 = xor($Rs32,$Rt32)",
-ALU32_3op_tc_1_SLOT0123, TypeALU32_3op>, Enc_9626139, PredNewRel {
+tc_1b6011fb, TypeALU32_3op>, Enc_ea4c54, PredNewRel {
let Inst{7-7} = 0b0;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11111001011;
@@ -946,7 +974,7 @@ def A2_pxortnew : HInst<
(outs IntRegs:$Rd32),
(ins PredRegs:$Pu4, IntRegs:$Rs32, IntRegs:$Rt32),
"if ($Pu4.new) $Rd32 = xor($Rs32,$Rt32)",
-ALU32_3op_tc_1_SLOT0123, TypeALU32_3op>, Enc_9626139, PredNewRel {
+tc_28d296df, TypeALU32_3op>, Enc_ea4c54, PredNewRel {
let Inst{7-7} = 0b0;
let Inst{13-13} = 0b1;
let Inst{31-21} = 0b11111001011;
@@ -960,18 +988,19 @@ def A2_roundsat : HInst<
(outs IntRegs:$Rd32),
(ins DoubleRegs:$Rss32),
"$Rd32 = round($Rss32):sat",
-S_2op_tc_1_SLOT23, TypeS_2op>, Enc_3742184, Requires<[HasV5T]> {
+tc_94e6ffd9, TypeS_2op>, Enc_90cd8b, Requires<[HasV5T]> {
let Inst{13-5} = 0b000000001;
let Inst{31-21} = 0b10001000110;
let hasNewValue = 1;
let opNewValue = 0;
+let prefersSlot3 = 1;
let Defs = [USR_OVF];
}
def A2_sat : HInst<
(outs IntRegs:$Rd32),
(ins DoubleRegs:$Rss32),
"$Rd32 = sat($Rss32)",
-S_2op_tc_1_SLOT23, TypeS_2op>, Enc_3742184 {
+tc_b86c7e8b, TypeS_2op>, Enc_90cd8b {
let Inst{13-5} = 0b000000000;
let Inst{31-21} = 0b10001000110;
let hasNewValue = 1;
@@ -982,7 +1011,7 @@ def A2_satb : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rs32),
"$Rd32 = satb($Rs32)",
-S_2op_tc_1_SLOT23, TypeS_2op>, Enc_4075554 {
+tc_b86c7e8b, TypeS_2op>, Enc_5e2823 {
let Inst{13-5} = 0b000000111;
let Inst{31-21} = 0b10001100110;
let hasNewValue = 1;
@@ -993,7 +1022,7 @@ def A2_sath : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rs32),
"$Rd32 = sath($Rs32)",
-S_2op_tc_1_SLOT23, TypeS_2op>, Enc_4075554 {
+tc_b86c7e8b, TypeS_2op>, Enc_5e2823 {
let Inst{13-5} = 0b000000100;
let Inst{31-21} = 0b10001100110;
let hasNewValue = 1;
@@ -1004,7 +1033,7 @@ def A2_satub : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rs32),
"$Rd32 = satub($Rs32)",
-S_2op_tc_1_SLOT23, TypeS_2op>, Enc_4075554 {
+tc_b86c7e8b, TypeS_2op>, Enc_5e2823 {
let Inst{13-5} = 0b000000110;
let Inst{31-21} = 0b10001100110;
let hasNewValue = 1;
@@ -1015,7 +1044,7 @@ def A2_satuh : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rs32),
"$Rd32 = satuh($Rs32)",
-S_2op_tc_1_SLOT23, TypeS_2op>, Enc_4075554 {
+tc_b86c7e8b, TypeS_2op>, Enc_5e2823 {
let Inst{13-5} = 0b000000101;
let Inst{31-21} = 0b10001100110;
let hasNewValue = 1;
@@ -1026,7 +1055,7 @@ def A2_sub : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rt32, IntRegs:$Rs32),
"$Rd32 = sub($Rt32,$Rs32)",
-ALU32_3op_tc_1_SLOT0123, TypeALU32_3op>, Enc_8605375, PredNewRel, ImmRegRel {
+tc_548f402d, TypeALU32_3op>, Enc_bd6011, PredNewRel, ImmRegRel {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11110011001;
@@ -1041,145 +1070,157 @@ def A2_subh_h16_hh : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rt32, IntRegs:$Rs32),
"$Rd32 = sub($Rt32.h,$Rs32.h):<<16",
-ALU64_tc_1_SLOT23, TypeALU64>, Enc_8605375 {
+tc_bd16579e, TypeALU64>, Enc_bd6011 {
let Inst{7-5} = 0b011;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11010101011;
let hasNewValue = 1;
let opNewValue = 0;
+let prefersSlot3 = 1;
}
def A2_subh_h16_hl : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rt32, IntRegs:$Rs32),
"$Rd32 = sub($Rt32.h,$Rs32.l):<<16",
-ALU64_tc_1_SLOT23, TypeALU64>, Enc_8605375 {
+tc_bd16579e, TypeALU64>, Enc_bd6011 {
let Inst{7-5} = 0b010;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11010101011;
let hasNewValue = 1;
let opNewValue = 0;
+let prefersSlot3 = 1;
}
def A2_subh_h16_lh : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rt32, IntRegs:$Rs32),
"$Rd32 = sub($Rt32.l,$Rs32.h):<<16",
-ALU64_tc_1_SLOT23, TypeALU64>, Enc_8605375 {
+tc_bd16579e, TypeALU64>, Enc_bd6011 {
let Inst{7-5} = 0b001;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11010101011;
let hasNewValue = 1;
let opNewValue = 0;
+let prefersSlot3 = 1;
}
def A2_subh_h16_ll : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rt32, IntRegs:$Rs32),
"$Rd32 = sub($Rt32.l,$Rs32.l):<<16",
-ALU64_tc_1_SLOT23, TypeALU64>, Enc_8605375 {
+tc_bd16579e, TypeALU64>, Enc_bd6011 {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11010101011;
let hasNewValue = 1;
let opNewValue = 0;
+let prefersSlot3 = 1;
}
def A2_subh_h16_sat_hh : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rt32, IntRegs:$Rs32),
"$Rd32 = sub($Rt32.h,$Rs32.h):sat:<<16",
-ALU64_tc_2_SLOT23, TypeALU64>, Enc_8605375 {
+tc_47ab9233, TypeALU64>, Enc_bd6011 {
let Inst{7-5} = 0b111;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11010101011;
let hasNewValue = 1;
let opNewValue = 0;
+let prefersSlot3 = 1;
let Defs = [USR_OVF];
}
def A2_subh_h16_sat_hl : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rt32, IntRegs:$Rs32),
"$Rd32 = sub($Rt32.h,$Rs32.l):sat:<<16",
-ALU64_tc_2_SLOT23, TypeALU64>, Enc_8605375 {
+tc_47ab9233, TypeALU64>, Enc_bd6011 {
let Inst{7-5} = 0b110;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11010101011;
let hasNewValue = 1;
let opNewValue = 0;
+let prefersSlot3 = 1;
let Defs = [USR_OVF];
}
def A2_subh_h16_sat_lh : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rt32, IntRegs:$Rs32),
"$Rd32 = sub($Rt32.l,$Rs32.h):sat:<<16",
-ALU64_tc_2_SLOT23, TypeALU64>, Enc_8605375 {
+tc_47ab9233, TypeALU64>, Enc_bd6011 {
let Inst{7-5} = 0b101;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11010101011;
let hasNewValue = 1;
let opNewValue = 0;
+let prefersSlot3 = 1;
let Defs = [USR_OVF];
}
def A2_subh_h16_sat_ll : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rt32, IntRegs:$Rs32),
"$Rd32 = sub($Rt32.l,$Rs32.l):sat:<<16",
-ALU64_tc_2_SLOT23, TypeALU64>, Enc_8605375 {
+tc_47ab9233, TypeALU64>, Enc_bd6011 {
let Inst{7-5} = 0b100;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11010101011;
let hasNewValue = 1;
let opNewValue = 0;
+let prefersSlot3 = 1;
let Defs = [USR_OVF];
}
def A2_subh_l16_hl : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rt32, IntRegs:$Rs32),
"$Rd32 = sub($Rt32.l,$Rs32.h)",
-ALU64_tc_1_SLOT23, TypeALU64>, Enc_8605375 {
+tc_7ca2ea10, TypeALU64>, Enc_bd6011 {
let Inst{7-5} = 0b010;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11010101001;
let hasNewValue = 1;
let opNewValue = 0;
+let prefersSlot3 = 1;
}
def A2_subh_l16_ll : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rt32, IntRegs:$Rs32),
"$Rd32 = sub($Rt32.l,$Rs32.l)",
-ALU64_tc_1_SLOT23, TypeALU64>, Enc_8605375 {
+tc_7ca2ea10, TypeALU64>, Enc_bd6011 {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11010101001;
let hasNewValue = 1;
let opNewValue = 0;
+let prefersSlot3 = 1;
}
def A2_subh_l16_sat_hl : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rt32, IntRegs:$Rs32),
"$Rd32 = sub($Rt32.l,$Rs32.h):sat",
-ALU64_tc_2_SLOT23, TypeALU64>, Enc_8605375 {
+tc_47ab9233, TypeALU64>, Enc_bd6011 {
let Inst{7-5} = 0b110;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11010101001;
let hasNewValue = 1;
let opNewValue = 0;
+let prefersSlot3 = 1;
let Defs = [USR_OVF];
}
def A2_subh_l16_sat_ll : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rt32, IntRegs:$Rs32),
"$Rd32 = sub($Rt32.l,$Rs32.l):sat",
-ALU64_tc_2_SLOT23, TypeALU64>, Enc_8605375 {
+tc_47ab9233, TypeALU64>, Enc_bd6011 {
let Inst{7-5} = 0b100;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11010101001;
let hasNewValue = 1;
let opNewValue = 0;
+let prefersSlot3 = 1;
let Defs = [USR_OVF];
}
def A2_subp : HInst<
(outs DoubleRegs:$Rdd32),
(ins DoubleRegs:$Rtt32, DoubleRegs:$Rss32),
"$Rdd32 = sub($Rtt32,$Rss32)",
-ALU64_tc_1_SLOT23, TypeALU64>, Enc_11687333 {
+tc_9c18c9a5, TypeALU64>, Enc_ea23e4 {
let Inst{7-5} = 0b111;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11010011001;
@@ -1188,7 +1229,7 @@ def A2_subri : HInst<
(outs IntRegs:$Rd32),
(ins s32_0Imm:$Ii, IntRegs:$Rs32),
"$Rd32 = sub(#$Ii,$Rs32)",
-ALU32_2op_tc_1_SLOT0123, TypeALU32_2op>, Enc_13472494, PredNewRel, ImmRegRel {
+tc_548f402d, TypeALU32_2op>, Enc_140c83, PredNewRel, ImmRegRel {
let Inst{31-22} = 0b0111011001;
let hasNewValue = 1;
let opNewValue = 0;
@@ -1204,12 +1245,13 @@ def A2_subsat : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rt32, IntRegs:$Rs32),
"$Rd32 = sub($Rt32,$Rs32):sat",
-ALU32_3op_tc_2_SLOT0123, TypeALU32_3op>, Enc_8605375 {
+tc_b0f50e3c, TypeALU32_3op>, Enc_bd6011 {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11110110110;
let hasNewValue = 1;
let opNewValue = 0;
+let prefersSlot3 = 1;
let Defs = [USR_OVF];
let InputType = "reg";
}
@@ -1217,7 +1259,7 @@ def A2_svaddh : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rs32, IntRegs:$Rt32),
"$Rd32 = vaddh($Rs32,$Rt32)",
-ALU32_3op_tc_1_SLOT0123, TypeALU32_3op>, Enc_14071773 {
+tc_548f402d, TypeALU32_3op>, Enc_5ab2be {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11110110000;
@@ -1230,12 +1272,13 @@ def A2_svaddhs : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rs32, IntRegs:$Rt32),
"$Rd32 = vaddh($Rs32,$Rt32):sat",
-ALU32_3op_tc_2_SLOT0123, TypeALU32_3op>, Enc_14071773 {
+tc_b0f50e3c, TypeALU32_3op>, Enc_5ab2be {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11110110001;
let hasNewValue = 1;
let opNewValue = 0;
+let prefersSlot3 = 1;
let Defs = [USR_OVF];
let InputType = "reg";
let isCommutable = 1;
@@ -1244,12 +1287,13 @@ def A2_svadduhs : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rs32, IntRegs:$Rt32),
"$Rd32 = vadduh($Rs32,$Rt32):sat",
-ALU32_3op_tc_2_SLOT0123, TypeALU32_3op>, Enc_14071773 {
+tc_b0f50e3c, TypeALU32_3op>, Enc_5ab2be {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11110110011;
let hasNewValue = 1;
let opNewValue = 0;
+let prefersSlot3 = 1;
let Defs = [USR_OVF];
let InputType = "reg";
let isCommutable = 1;
@@ -1258,12 +1302,13 @@ def A2_svavgh : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rs32, IntRegs:$Rt32),
"$Rd32 = vavgh($Rs32,$Rt32)",
-ALU32_3op_tc_1_SLOT0123, TypeALU32_3op>, Enc_14071773 {
+tc_511f28f6, TypeALU32_3op>, Enc_5ab2be {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11110111000;
let hasNewValue = 1;
let opNewValue = 0;
+let prefersSlot3 = 1;
let InputType = "reg";
let isCommutable = 1;
}
@@ -1271,12 +1316,13 @@ def A2_svavghs : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rs32, IntRegs:$Rt32),
"$Rd32 = vavgh($Rs32,$Rt32):rnd",
-ALU32_3op_tc_2_SLOT0123, TypeALU32_3op>, Enc_14071773 {
+tc_76c4c5ef, TypeALU32_3op>, Enc_5ab2be {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11110111001;
let hasNewValue = 1;
let opNewValue = 0;
+let prefersSlot3 = 1;
let InputType = "reg";
let isCommutable = 1;
}
@@ -1284,19 +1330,20 @@ def A2_svnavgh : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rt32, IntRegs:$Rs32),
"$Rd32 = vnavgh($Rt32,$Rs32)",
-ALU32_3op_tc_1_SLOT0123, TypeALU32_3op>, Enc_8605375 {
+tc_511f28f6, TypeALU32_3op>, Enc_bd6011 {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11110111011;
let hasNewValue = 1;
let opNewValue = 0;
+let prefersSlot3 = 1;
let InputType = "reg";
}
def A2_svsubh : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rt32, IntRegs:$Rs32),
"$Rd32 = vsubh($Rt32,$Rs32)",
-ALU32_3op_tc_1_SLOT0123, TypeALU32_3op>, Enc_8605375 {
+tc_548f402d, TypeALU32_3op>, Enc_bd6011 {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11110110100;
@@ -1308,12 +1355,13 @@ def A2_svsubhs : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rt32, IntRegs:$Rs32),
"$Rd32 = vsubh($Rt32,$Rs32):sat",
-ALU32_3op_tc_2_SLOT0123, TypeALU32_3op>, Enc_8605375 {
+tc_b0f50e3c, TypeALU32_3op>, Enc_bd6011 {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11110110101;
let hasNewValue = 1;
let opNewValue = 0;
+let prefersSlot3 = 1;
let Defs = [USR_OVF];
let InputType = "reg";
}
@@ -1321,12 +1369,13 @@ def A2_svsubuhs : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rt32, IntRegs:$Rs32),
"$Rd32 = vsubuh($Rt32,$Rs32):sat",
-ALU32_3op_tc_2_SLOT0123, TypeALU32_3op>, Enc_8605375 {
+tc_b0f50e3c, TypeALU32_3op>, Enc_bd6011 {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11110110111;
let hasNewValue = 1;
let opNewValue = 0;
+let prefersSlot3 = 1;
let Defs = [USR_OVF];
let InputType = "reg";
}
@@ -1334,7 +1383,7 @@ def A2_swiz : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rs32),
"$Rd32 = swiz($Rs32)",
-S_2op_tc_1_SLOT23, TypeS_2op>, Enc_4075554 {
+tc_b86c7e8b, TypeS_2op>, Enc_5e2823 {
let Inst{13-5} = 0b000000111;
let Inst{31-21} = 0b10001100100;
let hasNewValue = 1;
@@ -1344,7 +1393,7 @@ def A2_sxtb : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rs32),
"$Rd32 = sxtb($Rs32)",
-ALU32_2op_tc_1_SLOT0123, TypeALU32_2op>, Enc_4075554, PredNewRel {
+tc_f16d5b17, TypeALU32_2op>, Enc_5e2823, PredNewRel {
let Inst{13-5} = 0b000000000;
let Inst{31-21} = 0b01110000101;
let hasNewValue = 1;
@@ -1356,7 +1405,7 @@ def A2_sxth : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rs32),
"$Rd32 = sxth($Rs32)",
-ALU32_2op_tc_1_SLOT0123, TypeALU32_2op>, Enc_4075554, PredNewRel {
+tc_f16d5b17, TypeALU32_2op>, Enc_5e2823, PredNewRel {
let Inst{13-5} = 0b000000000;
let Inst{31-21} = 0b01110000111;
let hasNewValue = 1;
@@ -1368,7 +1417,7 @@ def A2_sxtw : HInst<
(outs DoubleRegs:$Rdd32),
(ins IntRegs:$Rs32),
"$Rdd32 = sxtw($Rs32)",
-S_2op_tc_1_SLOT23, TypeS_2op>, Enc_4030179 {
+tc_b86c7e8b, TypeS_2op>, Enc_3a3d62 {
let Inst{13-5} = 0b000000000;
let Inst{31-21} = 0b10000100010;
}
@@ -1376,7 +1425,7 @@ def A2_tfr : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rs32),
"$Rd32 = $Rs32",
-ALU32_2op_tc_1_SLOT0123, TypeALU32_2op>, Enc_4075554, PredNewRel {
+tc_f16d5b17, TypeALU32_2op>, Enc_5e2823, PredNewRel {
let Inst{13-5} = 0b000000000;
let Inst{31-21} = 0b01110000011;
let hasNewValue = 1;
@@ -1389,7 +1438,7 @@ def A2_tfrcrr : HInst<
(outs IntRegs:$Rd32),
(ins CtrRegs:$Cs32),
"$Rd32 = $Cs32",
-CR_tc_3x_SLOT3, TypeCR>, Enc_1539665 {
+tc_3b4892c6, TypeCR>, Enc_0cb018 {
let Inst{13-5} = 0b000000000;
let Inst{31-21} = 0b01101010000;
let hasNewValue = 1;
@@ -1399,7 +1448,7 @@ def A2_tfrf : HInst<
(outs IntRegs:$Rd32),
(ins PredRegs:$Pu4, IntRegs:$Rs32),
"if (!$Pu4) $Rd32 = $Rs32",
-ALU32_2op_tc_1_SLOT0123, TypeALU32_2op>, PredNewRel, ImmRegRel {
+tc_1b6011fb, TypeALU32_2op>, PredNewRel, ImmRegRel {
let isPredicated = 1;
let isPredicatedFalse = 1;
let hasNewValue = 1;
@@ -1414,7 +1463,7 @@ def A2_tfrfnew : HInst<
(outs IntRegs:$Rd32),
(ins PredRegs:$Pu4, IntRegs:$Rs32),
"if (!$Pu4.new) $Rd32 = $Rs32",
-ALU32_2op_tc_1_SLOT0123, TypeALU32_2op>, PredNewRel, ImmRegRel {
+tc_28d296df, TypeALU32_2op>, PredNewRel, ImmRegRel {
let isPredicated = 1;
let isPredicatedFalse = 1;
let hasNewValue = 1;
@@ -1430,7 +1479,7 @@ def A2_tfrih : HInst<
(outs IntRegs:$Rx32),
(ins IntRegs:$Rx32in, u16_0Imm:$Ii),
"$Rx32.h = #$Ii",
-ALU32_2op_tc_1_SLOT0123, TypeALU32_2op>, Enc_6130414 {
+tc_548f402d, TypeALU32_2op>, Enc_51436c {
let Inst{21-21} = 0b1;
let Inst{31-24} = 0b01110010;
let hasNewValue = 1;
@@ -1441,7 +1490,7 @@ def A2_tfril : HInst<
(outs IntRegs:$Rx32),
(ins IntRegs:$Rx32in, u16_0Imm:$Ii),
"$Rx32.l = #$Ii",
-ALU32_2op_tc_1_SLOT0123, TypeALU32_2op>, Enc_6130414 {
+tc_548f402d, TypeALU32_2op>, Enc_51436c {
let Inst{21-21} = 0b1;
let Inst{31-24} = 0b01110001;
let hasNewValue = 1;
@@ -1452,7 +1501,7 @@ def A2_tfrp : HInst<
(outs DoubleRegs:$Rdd32),
(ins DoubleRegs:$Rss32),
"$Rdd32 = $Rss32",
-ALU32_2op_tc_1_SLOT0123, TypeALU32_2op>, PredNewRel {
+tc_548f402d, TypeALU32_2op>, PredNewRel {
let BaseOpcode = "A2_tfrp";
let isPredicable = 1;
let isPseudo = 1;
@@ -1461,7 +1510,7 @@ def A2_tfrpf : HInst<
(outs DoubleRegs:$Rdd32),
(ins PredRegs:$Pu4, DoubleRegs:$Rss32),
"if (!$Pu4) $Rdd32 = $Rss32",
-ALU32_2op_tc_1_SLOT0123, TypeALU32_2op>, PredNewRel {
+tc_548f402d, TypeALU32_2op>, PredNewRel {
let isPredicated = 1;
let isPredicatedFalse = 1;
let BaseOpcode = "A2_tfrp";
@@ -1471,7 +1520,7 @@ def A2_tfrpfnew : HInst<
(outs DoubleRegs:$Rdd32),
(ins PredRegs:$Pu4, DoubleRegs:$Rss32),
"if (!$Pu4.new) $Rdd32 = $Rss32",
-ALU32_2op_tc_1_SLOT0123, TypeALU32_2op>, PredNewRel {
+tc_b08be45e, TypeALU32_2op>, PredNewRel {
let isPredicated = 1;
let isPredicatedFalse = 1;
let isPredicatedNew = 1;
@@ -1482,7 +1531,7 @@ def A2_tfrpi : HInst<
(outs DoubleRegs:$Rdd32),
(ins s8_0Imm:$Ii),
"$Rdd32 = #$Ii",
-ALU64_tc_1_SLOT23, TypeALU64> {
+tc_548f402d, TypeALU64> {
let isReMaterializable = 1;
let isAsCheapAsAMove = 1;
let isMoveImm = 1;
@@ -1492,7 +1541,7 @@ def A2_tfrpt : HInst<
(outs DoubleRegs:$Rdd32),
(ins PredRegs:$Pu4, DoubleRegs:$Rss32),
"if ($Pu4) $Rdd32 = $Rss32",
-ALU32_2op_tc_1_SLOT0123, TypeALU32_2op>, PredNewRel {
+tc_548f402d, TypeALU32_2op>, PredNewRel {
let isPredicated = 1;
let BaseOpcode = "A2_tfrp";
let isPseudo = 1;
@@ -1501,7 +1550,7 @@ def A2_tfrptnew : HInst<
(outs DoubleRegs:$Rdd32),
(ins PredRegs:$Pu4, DoubleRegs:$Rss32),
"if ($Pu4.new) $Rdd32 = $Rss32",
-ALU32_2op_tc_1_SLOT0123, TypeALU32_2op>, PredNewRel {
+tc_b08be45e, TypeALU32_2op>, PredNewRel {
let isPredicated = 1;
let isPredicatedNew = 1;
let BaseOpcode = "A2_tfrp";
@@ -1511,7 +1560,7 @@ def A2_tfrrcr : HInst<
(outs CtrRegs:$Cd32),
(ins IntRegs:$Rs32),
"$Cd32 = $Rs32",
-CR_tc_3x_SLOT3, TypeCR>, Enc_9018141 {
+tc_82f0f122, TypeCR>, Enc_bd811a {
let Inst{13-5} = 0b000000000;
let Inst{31-21} = 0b01100010001;
let hasNewValue = 1;
@@ -1521,7 +1570,7 @@ def A2_tfrsi : HInst<
(outs IntRegs:$Rd32),
(ins s32_0Imm:$Ii),
"$Rd32 = #$Ii",
-ALU32_2op_tc_1_SLOT0123, TypeALU32_2op>, Enc_7971062, PredNewRel, ImmRegRel {
+tc_f16d5b17, TypeALU32_2op>, Enc_5e87ce, PredNewRel, ImmRegRel {
let Inst{21-21} = 0b0;
let Inst{31-24} = 0b01111000;
let hasNewValue = 1;
@@ -1543,7 +1592,7 @@ def A2_tfrt : HInst<
(outs IntRegs:$Rd32),
(ins PredRegs:$Pu4, IntRegs:$Rs32),
"if ($Pu4) $Rd32 = $Rs32",
-ALU32_2op_tc_1_SLOT0123, TypeALU32_2op>, PredNewRel, ImmRegRel {
+tc_1b6011fb, TypeALU32_2op>, PredNewRel, ImmRegRel {
let isPredicated = 1;
let hasNewValue = 1;
let opNewValue = 0;
@@ -1557,7 +1606,7 @@ def A2_tfrtnew : HInst<
(outs IntRegs:$Rd32),
(ins PredRegs:$Pu4, IntRegs:$Rs32),
"if ($Pu4.new) $Rd32 = $Rs32",
-ALU32_2op_tc_1_SLOT0123, TypeALU32_2op>, PredNewRel, ImmRegRel {
+tc_28d296df, TypeALU32_2op>, PredNewRel, ImmRegRel {
let isPredicated = 1;
let hasNewValue = 1;
let opNewValue = 0;
@@ -1572,41 +1621,45 @@ def A2_vabsh : HInst<
(outs DoubleRegs:$Rdd32),
(ins DoubleRegs:$Rss32),
"$Rdd32 = vabsh($Rss32)",
-S_2op_tc_1_SLOT23, TypeS_2op>, Enc_13133231 {
+tc_94e6ffd9, TypeS_2op>, Enc_b9c5fb {
let Inst{13-5} = 0b000000100;
let Inst{31-21} = 0b10000000010;
+let prefersSlot3 = 1;
}
def A2_vabshsat : HInst<
(outs DoubleRegs:$Rdd32),
(ins DoubleRegs:$Rss32),
"$Rdd32 = vabsh($Rss32):sat",
-S_2op_tc_1_SLOT23, TypeS_2op>, Enc_13133231 {
+tc_94e6ffd9, TypeS_2op>, Enc_b9c5fb {
let Inst{13-5} = 0b000000101;
let Inst{31-21} = 0b10000000010;
+let prefersSlot3 = 1;
let Defs = [USR_OVF];
}
def A2_vabsw : HInst<
(outs DoubleRegs:$Rdd32),
(ins DoubleRegs:$Rss32),
"$Rdd32 = vabsw($Rss32)",
-S_2op_tc_1_SLOT23, TypeS_2op>, Enc_13133231 {
+tc_94e6ffd9, TypeS_2op>, Enc_b9c5fb {
let Inst{13-5} = 0b000000110;
let Inst{31-21} = 0b10000000010;
+let prefersSlot3 = 1;
}
def A2_vabswsat : HInst<
(outs DoubleRegs:$Rdd32),
(ins DoubleRegs:$Rss32),
"$Rdd32 = vabsw($Rss32):sat",
-S_2op_tc_1_SLOT23, TypeS_2op>, Enc_13133231 {
+tc_94e6ffd9, TypeS_2op>, Enc_b9c5fb {
let Inst{13-5} = 0b000000111;
let Inst{31-21} = 0b10000000010;
+let prefersSlot3 = 1;
let Defs = [USR_OVF];
}
def A2_vaddb_map : HInst<
(outs DoubleRegs:$Rdd32),
(ins DoubleRegs:$Rss32, DoubleRegs:$Rtt32),
"$Rdd32 = vaddb($Rss32,$Rtt32)",
-PSEUDO, TypeMAPPING> {
+tc_9c18c9a5, TypeMAPPING> {
let isPseudo = 1;
let isCodeGenOnly = 1;
}
@@ -1614,7 +1667,7 @@ def A2_vaddh : HInst<
(outs DoubleRegs:$Rdd32),
(ins DoubleRegs:$Rss32, DoubleRegs:$Rtt32),
"$Rdd32 = vaddh($Rss32,$Rtt32)",
-ALU64_tc_1_SLOT23, TypeALU64>, Enc_8333157 {
+tc_9c18c9a5, TypeALU64>, Enc_a56825 {
let Inst{7-5} = 0b010;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11010011000;
@@ -1623,17 +1676,18 @@ def A2_vaddhs : HInst<
(outs DoubleRegs:$Rdd32),
(ins DoubleRegs:$Rss32, DoubleRegs:$Rtt32),
"$Rdd32 = vaddh($Rss32,$Rtt32):sat",
-ALU64_tc_2_SLOT23, TypeALU64>, Enc_8333157 {
+tc_47ab9233, TypeALU64>, Enc_a56825 {
let Inst{7-5} = 0b011;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11010011000;
+let prefersSlot3 = 1;
let Defs = [USR_OVF];
}
def A2_vaddub : HInst<
(outs DoubleRegs:$Rdd32),
(ins DoubleRegs:$Rss32, DoubleRegs:$Rtt32),
"$Rdd32 = vaddub($Rss32,$Rtt32)",
-ALU64_tc_2_SLOT23, TypeALU64>, Enc_8333157 {
+tc_9c18c9a5, TypeALU64>, Enc_a56825 {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11010011000;
@@ -1642,27 +1696,29 @@ def A2_vaddubs : HInst<
(outs DoubleRegs:$Rdd32),
(ins DoubleRegs:$Rss32, DoubleRegs:$Rtt32),
"$Rdd32 = vaddub($Rss32,$Rtt32):sat",
-ALU64_tc_2_SLOT23, TypeALU64>, Enc_8333157 {
+tc_47ab9233, TypeALU64>, Enc_a56825 {
let Inst{7-5} = 0b001;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11010011000;
+let prefersSlot3 = 1;
let Defs = [USR_OVF];
}
def A2_vadduhs : HInst<
(outs DoubleRegs:$Rdd32),
(ins DoubleRegs:$Rss32, DoubleRegs:$Rtt32),
"$Rdd32 = vadduh($Rss32,$Rtt32):sat",
-ALU64_tc_2_SLOT23, TypeALU64>, Enc_8333157 {
+tc_47ab9233, TypeALU64>, Enc_a56825 {
let Inst{7-5} = 0b100;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11010011000;
+let prefersSlot3 = 1;
let Defs = [USR_OVF];
}
def A2_vaddw : HInst<
(outs DoubleRegs:$Rdd32),
(ins DoubleRegs:$Rss32, DoubleRegs:$Rtt32),
"$Rdd32 = vaddw($Rss32,$Rtt32)",
-ALU64_tc_1_SLOT23, TypeALU64>, Enc_8333157 {
+tc_9c18c9a5, TypeALU64>, Enc_a56825 {
let Inst{7-5} = 0b101;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11010011000;
@@ -1671,26 +1727,28 @@ def A2_vaddws : HInst<
(outs DoubleRegs:$Rdd32),
(ins DoubleRegs:$Rss32, DoubleRegs:$Rtt32),
"$Rdd32 = vaddw($Rss32,$Rtt32):sat",
-ALU64_tc_2_SLOT23, TypeALU64>, Enc_8333157 {
+tc_47ab9233, TypeALU64>, Enc_a56825 {
let Inst{7-5} = 0b110;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11010011000;
+let prefersSlot3 = 1;
let Defs = [USR_OVF];
}
def A2_vavgh : HInst<
(outs DoubleRegs:$Rdd32),
(ins DoubleRegs:$Rss32, DoubleRegs:$Rtt32),
"$Rdd32 = vavgh($Rss32,$Rtt32)",
-ALU64_tc_1_SLOT23, TypeALU64>, Enc_8333157 {
+tc_cd321066, TypeALU64>, Enc_a56825 {
let Inst{7-5} = 0b010;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11010011010;
+let prefersSlot3 = 1;
}
def A2_vavghcr : HInst<
(outs DoubleRegs:$Rdd32),
(ins DoubleRegs:$Rss32, DoubleRegs:$Rtt32),
"$Rdd32 = vavgh($Rss32,$Rtt32):crnd",
-ALU64_tc_2_SLOT23, TypeALU64>, Enc_8333157 {
+tc_63cd9d2d, TypeALU64>, Enc_a56825 {
let Inst{7-5} = 0b100;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11010011010;
@@ -1700,79 +1758,87 @@ def A2_vavghr : HInst<
(outs DoubleRegs:$Rdd32),
(ins DoubleRegs:$Rss32, DoubleRegs:$Rtt32),
"$Rdd32 = vavgh($Rss32,$Rtt32):rnd",
-ALU64_tc_2_SLOT23, TypeALU64>, Enc_8333157 {
+tc_37326008, TypeALU64>, Enc_a56825 {
let Inst{7-5} = 0b011;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11010011010;
+let prefersSlot3 = 1;
}
def A2_vavgub : HInst<
(outs DoubleRegs:$Rdd32),
(ins DoubleRegs:$Rss32, DoubleRegs:$Rtt32),
"$Rdd32 = vavgub($Rss32,$Rtt32)",
-ALU64_tc_1_SLOT23, TypeALU64>, Enc_8333157 {
+tc_cd321066, TypeALU64>, Enc_a56825 {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11010011010;
+let prefersSlot3 = 1;
}
def A2_vavgubr : HInst<
(outs DoubleRegs:$Rdd32),
(ins DoubleRegs:$Rss32, DoubleRegs:$Rtt32),
"$Rdd32 = vavgub($Rss32,$Rtt32):rnd",
-ALU64_tc_2_SLOT23, TypeALU64>, Enc_8333157 {
+tc_37326008, TypeALU64>, Enc_a56825 {
let Inst{7-5} = 0b001;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11010011010;
+let prefersSlot3 = 1;
}
def A2_vavguh : HInst<
(outs DoubleRegs:$Rdd32),
(ins DoubleRegs:$Rss32, DoubleRegs:$Rtt32),
"$Rdd32 = vavguh($Rss32,$Rtt32)",
-ALU64_tc_1_SLOT23, TypeALU64>, Enc_8333157 {
+tc_cd321066, TypeALU64>, Enc_a56825 {
let Inst{7-5} = 0b101;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11010011010;
+let prefersSlot3 = 1;
}
def A2_vavguhr : HInst<
(outs DoubleRegs:$Rdd32),
(ins DoubleRegs:$Rss32, DoubleRegs:$Rtt32),
"$Rdd32 = vavguh($Rss32,$Rtt32):rnd",
-ALU64_tc_2_SLOT23, TypeALU64>, Enc_8333157 {
+tc_37326008, TypeALU64>, Enc_a56825 {
let Inst{7-5} = 0b110;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11010011010;
+let prefersSlot3 = 1;
}
def A2_vavguw : HInst<
(outs DoubleRegs:$Rdd32),
(ins DoubleRegs:$Rss32, DoubleRegs:$Rtt32),
"$Rdd32 = vavguw($Rss32,$Rtt32)",
-ALU64_tc_1_SLOT23, TypeALU64>, Enc_8333157 {
+tc_cd321066, TypeALU64>, Enc_a56825 {
let Inst{7-5} = 0b011;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11010011011;
+let prefersSlot3 = 1;
}
def A2_vavguwr : HInst<
(outs DoubleRegs:$Rdd32),
(ins DoubleRegs:$Rss32, DoubleRegs:$Rtt32),
"$Rdd32 = vavguw($Rss32,$Rtt32):rnd",
-ALU64_tc_2_SLOT23, TypeALU64>, Enc_8333157 {
+tc_37326008, TypeALU64>, Enc_a56825 {
let Inst{7-5} = 0b100;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11010011011;
+let prefersSlot3 = 1;
}
def A2_vavgw : HInst<
(outs DoubleRegs:$Rdd32),
(ins DoubleRegs:$Rss32, DoubleRegs:$Rtt32),
"$Rdd32 = vavgw($Rss32,$Rtt32)",
-ALU64_tc_1_SLOT23, TypeALU64>, Enc_8333157 {
+tc_cd321066, TypeALU64>, Enc_a56825 {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11010011011;
+let prefersSlot3 = 1;
}
def A2_vavgwcr : HInst<
(outs DoubleRegs:$Rdd32),
(ins DoubleRegs:$Rss32, DoubleRegs:$Rtt32),
"$Rdd32 = vavgw($Rss32,$Rtt32):crnd",
-ALU64_tc_2_SLOT23, TypeALU64>, Enc_8333157 {
+tc_63cd9d2d, TypeALU64>, Enc_a56825 {
let Inst{7-5} = 0b010;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11010011011;
@@ -1782,16 +1848,17 @@ def A2_vavgwr : HInst<
(outs DoubleRegs:$Rdd32),
(ins DoubleRegs:$Rss32, DoubleRegs:$Rtt32),
"$Rdd32 = vavgw($Rss32,$Rtt32):rnd",
-ALU64_tc_2_SLOT23, TypeALU64>, Enc_8333157 {
+tc_37326008, TypeALU64>, Enc_a56825 {
let Inst{7-5} = 0b001;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11010011011;
+let prefersSlot3 = 1;
}
def A2_vcmpbeq : HInst<
(outs PredRegs:$Pd4),
(ins DoubleRegs:$Rss32, DoubleRegs:$Rtt32),
"$Pd4 = vcmpb.eq($Rss32,$Rtt32)",
-ALU64_tc_2early_SLOT23, TypeALU64>, Enc_3831744 {
+tc_c58f771a, TypeALU64>, Enc_fcf7a7 {
let Inst{7-2} = 0b110000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11010010000;
@@ -1800,7 +1867,7 @@ def A2_vcmpbgtu : HInst<
(outs PredRegs:$Pd4),
(ins DoubleRegs:$Rss32, DoubleRegs:$Rtt32),
"$Pd4 = vcmpb.gtu($Rss32,$Rtt32)",
-ALU64_tc_2early_SLOT23, TypeALU64>, Enc_3831744 {
+tc_c58f771a, TypeALU64>, Enc_fcf7a7 {
let Inst{7-2} = 0b111000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11010010000;
@@ -1809,7 +1876,7 @@ def A2_vcmpheq : HInst<
(outs PredRegs:$Pd4),
(ins DoubleRegs:$Rss32, DoubleRegs:$Rtt32),
"$Pd4 = vcmph.eq($Rss32,$Rtt32)",
-ALU64_tc_2early_SLOT23, TypeALU64>, Enc_3831744 {
+tc_c58f771a, TypeALU64>, Enc_fcf7a7 {
let Inst{7-2} = 0b011000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11010010000;
@@ -1818,7 +1885,7 @@ def A2_vcmphgt : HInst<
(outs PredRegs:$Pd4),
(ins DoubleRegs:$Rss32, DoubleRegs:$Rtt32),
"$Pd4 = vcmph.gt($Rss32,$Rtt32)",
-ALU64_tc_2early_SLOT23, TypeALU64>, Enc_3831744 {
+tc_c58f771a, TypeALU64>, Enc_fcf7a7 {
let Inst{7-2} = 0b100000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11010010000;
@@ -1827,7 +1894,7 @@ def A2_vcmphgtu : HInst<
(outs PredRegs:$Pd4),
(ins DoubleRegs:$Rss32, DoubleRegs:$Rtt32),
"$Pd4 = vcmph.gtu($Rss32,$Rtt32)",
-ALU64_tc_2early_SLOT23, TypeALU64>, Enc_3831744 {
+tc_c58f771a, TypeALU64>, Enc_fcf7a7 {
let Inst{7-2} = 0b101000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11010010000;
@@ -1836,7 +1903,7 @@ def A2_vcmpweq : HInst<
(outs PredRegs:$Pd4),
(ins DoubleRegs:$Rss32, DoubleRegs:$Rtt32),
"$Pd4 = vcmpw.eq($Rss32,$Rtt32)",
-ALU64_tc_2early_SLOT23, TypeALU64>, Enc_3831744 {
+tc_c58f771a, TypeALU64>, Enc_fcf7a7 {
let Inst{7-2} = 0b000000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11010010000;
@@ -1845,7 +1912,7 @@ def A2_vcmpwgt : HInst<
(outs PredRegs:$Pd4),
(ins DoubleRegs:$Rss32, DoubleRegs:$Rtt32),
"$Pd4 = vcmpw.gt($Rss32,$Rtt32)",
-ALU64_tc_2early_SLOT23, TypeALU64>, Enc_3831744 {
+tc_c58f771a, TypeALU64>, Enc_fcf7a7 {
let Inst{7-2} = 0b001000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11010010000;
@@ -1854,7 +1921,7 @@ def A2_vcmpwgtu : HInst<
(outs PredRegs:$Pd4),
(ins DoubleRegs:$Rss32, DoubleRegs:$Rtt32),
"$Pd4 = vcmpw.gtu($Rss32,$Rtt32)",
-ALU64_tc_2early_SLOT23, TypeALU64>, Enc_3831744 {
+tc_c58f771a, TypeALU64>, Enc_fcf7a7 {
let Inst{7-2} = 0b010000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11010010000;
@@ -1863,133 +1930,147 @@ def A2_vconj : HInst<
(outs DoubleRegs:$Rdd32),
(ins DoubleRegs:$Rss32),
"$Rdd32 = vconj($Rss32):sat",
-S_2op_tc_1_SLOT23, TypeS_2op>, Enc_13133231 {
+tc_94e6ffd9, TypeS_2op>, Enc_b9c5fb {
let Inst{13-5} = 0b000000111;
let Inst{31-21} = 0b10000000100;
+let prefersSlot3 = 1;
let Defs = [USR_OVF];
}
def A2_vmaxb : HInst<
(outs DoubleRegs:$Rdd32),
(ins DoubleRegs:$Rtt32, DoubleRegs:$Rss32),
"$Rdd32 = vmaxb($Rtt32,$Rss32)",
-ALU64_tc_2_SLOT23, TypeALU64>, Enc_11687333 {
+tc_47ab9233, TypeALU64>, Enc_ea23e4 {
let Inst{7-5} = 0b110;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11010011110;
+let prefersSlot3 = 1;
}
def A2_vmaxh : HInst<
(outs DoubleRegs:$Rdd32),
(ins DoubleRegs:$Rtt32, DoubleRegs:$Rss32),
"$Rdd32 = vmaxh($Rtt32,$Rss32)",
-ALU64_tc_2_SLOT23, TypeALU64>, Enc_11687333 {
+tc_47ab9233, TypeALU64>, Enc_ea23e4 {
let Inst{7-5} = 0b001;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11010011110;
+let prefersSlot3 = 1;
}
def A2_vmaxub : HInst<
(outs DoubleRegs:$Rdd32),
(ins DoubleRegs:$Rtt32, DoubleRegs:$Rss32),
"$Rdd32 = vmaxub($Rtt32,$Rss32)",
-ALU64_tc_2_SLOT23, TypeALU64>, Enc_11687333 {
+tc_47ab9233, TypeALU64>, Enc_ea23e4 {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11010011110;
+let prefersSlot3 = 1;
}
def A2_vmaxuh : HInst<
(outs DoubleRegs:$Rdd32),
(ins DoubleRegs:$Rtt32, DoubleRegs:$Rss32),
"$Rdd32 = vmaxuh($Rtt32,$Rss32)",
-ALU64_tc_2_SLOT23, TypeALU64>, Enc_11687333 {
+tc_47ab9233, TypeALU64>, Enc_ea23e4 {
let Inst{7-5} = 0b010;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11010011110;
+let prefersSlot3 = 1;
}
def A2_vmaxuw : HInst<
(outs DoubleRegs:$Rdd32),
(ins DoubleRegs:$Rtt32, DoubleRegs:$Rss32),
"$Rdd32 = vmaxuw($Rtt32,$Rss32)",
-ALU64_tc_2_SLOT23, TypeALU64>, Enc_11687333 {
+tc_47ab9233, TypeALU64>, Enc_ea23e4 {
let Inst{7-5} = 0b101;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11010011101;
+let prefersSlot3 = 1;
}
def A2_vmaxw : HInst<
(outs DoubleRegs:$Rdd32),
(ins DoubleRegs:$Rtt32, DoubleRegs:$Rss32),
"$Rdd32 = vmaxw($Rtt32,$Rss32)",
-ALU64_tc_2_SLOT23, TypeALU64>, Enc_11687333 {
+tc_47ab9233, TypeALU64>, Enc_ea23e4 {
let Inst{7-5} = 0b011;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11010011110;
+let prefersSlot3 = 1;
}
def A2_vminb : HInst<
(outs DoubleRegs:$Rdd32),
(ins DoubleRegs:$Rtt32, DoubleRegs:$Rss32),
"$Rdd32 = vminb($Rtt32,$Rss32)",
-ALU64_tc_2_SLOT23, TypeALU64>, Enc_11687333 {
+tc_47ab9233, TypeALU64>, Enc_ea23e4 {
let Inst{7-5} = 0b111;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11010011110;
+let prefersSlot3 = 1;
}
def A2_vminh : HInst<
(outs DoubleRegs:$Rdd32),
(ins DoubleRegs:$Rtt32, DoubleRegs:$Rss32),
"$Rdd32 = vminh($Rtt32,$Rss32)",
-ALU64_tc_2_SLOT23, TypeALU64>, Enc_11687333 {
+tc_47ab9233, TypeALU64>, Enc_ea23e4 {
let Inst{7-5} = 0b001;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11010011101;
+let prefersSlot3 = 1;
}
def A2_vminub : HInst<
(outs DoubleRegs:$Rdd32),
(ins DoubleRegs:$Rtt32, DoubleRegs:$Rss32),
"$Rdd32 = vminub($Rtt32,$Rss32)",
-ALU64_tc_2_SLOT23, TypeALU64>, Enc_11687333 {
+tc_47ab9233, TypeALU64>, Enc_ea23e4 {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11010011101;
+let prefersSlot3 = 1;
}
def A2_vminuh : HInst<
(outs DoubleRegs:$Rdd32),
(ins DoubleRegs:$Rtt32, DoubleRegs:$Rss32),
"$Rdd32 = vminuh($Rtt32,$Rss32)",
-ALU64_tc_2_SLOT23, TypeALU64>, Enc_11687333 {
+tc_47ab9233, TypeALU64>, Enc_ea23e4 {
let Inst{7-5} = 0b010;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11010011101;
+let prefersSlot3 = 1;
}
def A2_vminuw : HInst<
(outs DoubleRegs:$Rdd32),
(ins DoubleRegs:$Rtt32, DoubleRegs:$Rss32),
"$Rdd32 = vminuw($Rtt32,$Rss32)",
-ALU64_tc_2_SLOT23, TypeALU64>, Enc_11687333 {
+tc_47ab9233, TypeALU64>, Enc_ea23e4 {
let Inst{7-5} = 0b100;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11010011101;
+let prefersSlot3 = 1;
}
def A2_vminw : HInst<
(outs DoubleRegs:$Rdd32),
(ins DoubleRegs:$Rtt32, DoubleRegs:$Rss32),
"$Rdd32 = vminw($Rtt32,$Rss32)",
-ALU64_tc_2_SLOT23, TypeALU64>, Enc_11687333 {
+tc_47ab9233, TypeALU64>, Enc_ea23e4 {
let Inst{7-5} = 0b011;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11010011101;
+let prefersSlot3 = 1;
}
def A2_vnavgh : HInst<
(outs DoubleRegs:$Rdd32),
(ins DoubleRegs:$Rtt32, DoubleRegs:$Rss32),
"$Rdd32 = vnavgh($Rtt32,$Rss32)",
-ALU64_tc_1_SLOT23, TypeALU64>, Enc_11687333 {
+tc_cd321066, TypeALU64>, Enc_ea23e4 {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11010011100;
+let prefersSlot3 = 1;
}
def A2_vnavghcr : HInst<
(outs DoubleRegs:$Rdd32),
(ins DoubleRegs:$Rtt32, DoubleRegs:$Rss32),
"$Rdd32 = vnavgh($Rtt32,$Rss32):crnd:sat",
-ALU64_tc_2_SLOT23, TypeALU64>, Enc_11687333 {
+tc_63cd9d2d, TypeALU64>, Enc_ea23e4 {
let Inst{7-5} = 0b010;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11010011100;
@@ -2000,7 +2081,7 @@ def A2_vnavghr : HInst<
(outs DoubleRegs:$Rdd32),
(ins DoubleRegs:$Rtt32, DoubleRegs:$Rss32),
"$Rdd32 = vnavgh($Rtt32,$Rss32):rnd:sat",
-ALU64_tc_2_SLOT23, TypeALU64>, Enc_11687333 {
+tc_63cd9d2d, TypeALU64>, Enc_ea23e4 {
let Inst{7-5} = 0b001;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11010011100;
@@ -2011,16 +2092,17 @@ def A2_vnavgw : HInst<
(outs DoubleRegs:$Rdd32),
(ins DoubleRegs:$Rtt32, DoubleRegs:$Rss32),
"$Rdd32 = vnavgw($Rtt32,$Rss32)",
-ALU64_tc_1_SLOT23, TypeALU64>, Enc_11687333 {
+tc_cd321066, TypeALU64>, Enc_ea23e4 {
let Inst{7-5} = 0b011;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11010011100;
+let prefersSlot3 = 1;
}
def A2_vnavgwcr : HInst<
(outs DoubleRegs:$Rdd32),
(ins DoubleRegs:$Rtt32, DoubleRegs:$Rss32),
"$Rdd32 = vnavgw($Rtt32,$Rss32):crnd:sat",
-ALU64_tc_2_SLOT23, TypeALU64>, Enc_11687333 {
+tc_63cd9d2d, TypeALU64>, Enc_ea23e4 {
let Inst{7-5} = 0b110;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11010011100;
@@ -2031,7 +2113,7 @@ def A2_vnavgwr : HInst<
(outs DoubleRegs:$Rdd32),
(ins DoubleRegs:$Rtt32, DoubleRegs:$Rss32),
"$Rdd32 = vnavgw($Rtt32,$Rss32):rnd:sat",
-ALU64_tc_2_SLOT23, TypeALU64>, Enc_11687333 {
+tc_63cd9d2d, TypeALU64>, Enc_ea23e4 {
let Inst{7-5} = 0b100;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11010011100;
@@ -2042,7 +2124,7 @@ def A2_vraddub : HInst<
(outs DoubleRegs:$Rdd32),
(ins DoubleRegs:$Rss32, DoubleRegs:$Rtt32),
"$Rdd32 = vraddub($Rss32,$Rtt32)",
-M_tc_3x_SLOT23, TypeM>, Enc_8333157 {
+tc_8c8041e6, TypeM>, Enc_a56825 {
let Inst{7-5} = 0b001;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101000010;
@@ -2052,7 +2134,7 @@ def A2_vraddub_acc : HInst<
(outs DoubleRegs:$Rxx32),
(ins DoubleRegs:$Rxx32in, DoubleRegs:$Rss32, DoubleRegs:$Rtt32),
"$Rxx32 += vraddub($Rss32,$Rtt32)",
-M_tc_3x_acc_SLOT23, TypeM>, Enc_12702821 {
+tc_8cb685d9, TypeM>, Enc_88c16c {
let Inst{7-5} = 0b001;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101010010;
@@ -2063,7 +2145,7 @@ def A2_vrsadub : HInst<
(outs DoubleRegs:$Rdd32),
(ins DoubleRegs:$Rss32, DoubleRegs:$Rtt32),
"$Rdd32 = vrsadub($Rss32,$Rtt32)",
-M_tc_3x_SLOT23, TypeM>, Enc_8333157 {
+tc_8c8041e6, TypeM>, Enc_a56825 {
let Inst{7-5} = 0b010;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101000010;
@@ -2073,7 +2155,7 @@ def A2_vrsadub_acc : HInst<
(outs DoubleRegs:$Rxx32),
(ins DoubleRegs:$Rxx32in, DoubleRegs:$Rss32, DoubleRegs:$Rtt32),
"$Rxx32 += vrsadub($Rss32,$Rtt32)",
-M_tc_3x_acc_SLOT23, TypeM>, Enc_12702821 {
+tc_8cb685d9, TypeM>, Enc_88c16c {
let Inst{7-5} = 0b010;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101010010;
@@ -2084,7 +2166,7 @@ def A2_vsubb_map : HInst<
(outs DoubleRegs:$Rdd32),
(ins DoubleRegs:$Rss32, DoubleRegs:$Rtt32),
"$Rdd32 = vsubb($Rss32,$Rtt32)",
-PSEUDO, TypeMAPPING> {
+tc_9c18c9a5, TypeMAPPING> {
let isPseudo = 1;
let isCodeGenOnly = 1;
}
@@ -2092,7 +2174,7 @@ def A2_vsubh : HInst<
(outs DoubleRegs:$Rdd32),
(ins DoubleRegs:$Rtt32, DoubleRegs:$Rss32),
"$Rdd32 = vsubh($Rtt32,$Rss32)",
-ALU64_tc_1_SLOT23, TypeALU64>, Enc_11687333 {
+tc_9c18c9a5, TypeALU64>, Enc_ea23e4 {
let Inst{7-5} = 0b010;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11010011001;
@@ -2101,17 +2183,18 @@ def A2_vsubhs : HInst<
(outs DoubleRegs:$Rdd32),
(ins DoubleRegs:$Rtt32, DoubleRegs:$Rss32),
"$Rdd32 = vsubh($Rtt32,$Rss32):sat",
-ALU64_tc_2_SLOT23, TypeALU64>, Enc_11687333 {
+tc_47ab9233, TypeALU64>, Enc_ea23e4 {
let Inst{7-5} = 0b011;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11010011001;
+let prefersSlot3 = 1;
let Defs = [USR_OVF];
}
def A2_vsubub : HInst<
(outs DoubleRegs:$Rdd32),
(ins DoubleRegs:$Rtt32, DoubleRegs:$Rss32),
"$Rdd32 = vsubub($Rtt32,$Rss32)",
-ALU64_tc_1_SLOT23, TypeALU64>, Enc_11687333 {
+tc_9c18c9a5, TypeALU64>, Enc_ea23e4 {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11010011001;
@@ -2120,27 +2203,29 @@ def A2_vsububs : HInst<
(outs DoubleRegs:$Rdd32),
(ins DoubleRegs:$Rtt32, DoubleRegs:$Rss32),
"$Rdd32 = vsubub($Rtt32,$Rss32):sat",
-ALU64_tc_2_SLOT23, TypeALU64>, Enc_11687333 {
+tc_47ab9233, TypeALU64>, Enc_ea23e4 {
let Inst{7-5} = 0b001;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11010011001;
+let prefersSlot3 = 1;
let Defs = [USR_OVF];
}
def A2_vsubuhs : HInst<
(outs DoubleRegs:$Rdd32),
(ins DoubleRegs:$Rtt32, DoubleRegs:$Rss32),
"$Rdd32 = vsubuh($Rtt32,$Rss32):sat",
-ALU64_tc_2_SLOT23, TypeALU64>, Enc_11687333 {
+tc_47ab9233, TypeALU64>, Enc_ea23e4 {
let Inst{7-5} = 0b100;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11010011001;
+let prefersSlot3 = 1;
let Defs = [USR_OVF];
}
def A2_vsubw : HInst<
(outs DoubleRegs:$Rdd32),
(ins DoubleRegs:$Rtt32, DoubleRegs:$Rss32),
"$Rdd32 = vsubw($Rtt32,$Rss32)",
-ALU64_tc_1_SLOT23, TypeALU64>, Enc_11687333 {
+tc_9c18c9a5, TypeALU64>, Enc_ea23e4 {
let Inst{7-5} = 0b101;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11010011001;
@@ -2149,17 +2234,18 @@ def A2_vsubws : HInst<
(outs DoubleRegs:$Rdd32),
(ins DoubleRegs:$Rtt32, DoubleRegs:$Rss32),
"$Rdd32 = vsubw($Rtt32,$Rss32):sat",
-ALU64_tc_2_SLOT23, TypeALU64>, Enc_11687333 {
+tc_47ab9233, TypeALU64>, Enc_ea23e4 {
let Inst{7-5} = 0b110;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11010011001;
+let prefersSlot3 = 1;
let Defs = [USR_OVF];
}
def A2_xor : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rs32, IntRegs:$Rt32),
"$Rd32 = xor($Rs32,$Rt32)",
-ALU32_3op_tc_1_SLOT0123, TypeALU32_3op>, Enc_14071773, PredNewRel {
+tc_548f402d, TypeALU32_3op>, Enc_5ab2be, PredNewRel {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11110001011;
@@ -2174,7 +2260,7 @@ def A2_xorp : HInst<
(outs DoubleRegs:$Rdd32),
(ins DoubleRegs:$Rss32, DoubleRegs:$Rtt32),
"$Rdd32 = xor($Rss32,$Rtt32)",
-ALU64_tc_1_SLOT23, TypeALU64>, Enc_8333157 {
+tc_9c18c9a5, TypeALU64>, Enc_a56825 {
let Inst{7-5} = 0b100;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11010011111;
@@ -2184,7 +2270,7 @@ def A2_zxtb : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rs32),
"$Rd32 = zxtb($Rs32)",
-ALU32_2op_tc_1_SLOT0123, TypeALU32_2op>, PredNewRel {
+tc_548f402d, TypeALU32_2op>, PredNewRel {
let hasNewValue = 1;
let opNewValue = 0;
let BaseOpcode = "A2_zxtb";
@@ -2196,7 +2282,7 @@ def A2_zxth : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rs32),
"$Rd32 = zxth($Rs32)",
-ALU32_2op_tc_1_SLOT0123, TypeALU32_2op>, Enc_4075554, PredNewRel {
+tc_f16d5b17, TypeALU32_2op>, Enc_5e2823, PredNewRel {
let Inst{13-5} = 0b000000000;
let Inst{31-21} = 0b01110000110;
let hasNewValue = 1;
@@ -2208,7 +2294,7 @@ def A4_addp_c : HInst<
(outs DoubleRegs:$Rdd32, PredRegs:$Px4),
(ins DoubleRegs:$Rss32, DoubleRegs:$Rtt32, PredRegs:$Px4in),
"$Rdd32 = add($Rss32,$Rtt32,$Px4):carry",
-S_3op_tc_1_SLOT23, TypeS_3op>, Enc_151014 {
+tc_a87879e8, TypeS_3op>, Enc_2b3f60 {
let Inst{7-7} = 0b0;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11000010110;
@@ -2219,7 +2305,7 @@ def A4_andn : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rt32, IntRegs:$Rs32),
"$Rd32 = and($Rt32,~$Rs32)",
-ALU32_3op_tc_1_SLOT0123, TypeALU32_3op>, Enc_8605375 {
+tc_548f402d, TypeALU32_3op>, Enc_bd6011 {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11110001100;
@@ -2231,7 +2317,7 @@ def A4_andnp : HInst<
(outs DoubleRegs:$Rdd32),
(ins DoubleRegs:$Rtt32, DoubleRegs:$Rss32),
"$Rdd32 = and($Rtt32,~$Rss32)",
-ALU64_tc_1_SLOT23, TypeALU64>, Enc_11687333 {
+tc_9c18c9a5, TypeALU64>, Enc_ea23e4 {
let Inst{7-5} = 0b001;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11010011111;
@@ -2240,32 +2326,34 @@ def A4_bitsplit : HInst<
(outs DoubleRegs:$Rdd32),
(ins IntRegs:$Rs32, IntRegs:$Rt32),
"$Rdd32 = bitsplit($Rs32,$Rt32)",
-ALU64_tc_1_SLOT23, TypeALU64>, Enc_1997594 {
+tc_7ca2ea10, TypeALU64>, Enc_be32a5 {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11010100001;
+let prefersSlot3 = 1;
}
def A4_bitspliti : HInst<
(outs DoubleRegs:$Rdd32),
(ins IntRegs:$Rs32, u5_0Imm:$Ii),
"$Rdd32 = bitsplit($Rs32,#$Ii)",
-S_2op_tc_2_SLOT23, TypeS_2op>, Enc_5654851 {
+tc_7ca2ea10, TypeS_2op>, Enc_311abd {
let Inst{7-5} = 0b100;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b10001000110;
+let prefersSlot3 = 1;
}
def A4_boundscheck : HInst<
(outs PredRegs:$Pd4),
(ins IntRegs:$Rs32, DoubleRegs:$Rtt32),
"$Pd4 = boundscheck($Rs32,$Rtt32)",
-M_tc_3x_SLOT23, TypeALU64> {
+tc_c58f771a, TypeALU64> {
let isPseudo = 1;
}
def A4_boundscheck_hi : HInst<
(outs PredRegs:$Pd4),
(ins DoubleRegs:$Rss32, DoubleRegs:$Rtt32),
"$Pd4 = boundscheck($Rss32,$Rtt32):raw:hi",
-ALU64_tc_2_SLOT23, TypeALU64>, Enc_3831744 {
+tc_c58f771a, TypeALU64>, Enc_fcf7a7 {
let Inst{7-2} = 0b101000;
let Inst{13-13} = 0b1;
let Inst{31-21} = 0b11010010000;
@@ -2274,7 +2362,7 @@ def A4_boundscheck_lo : HInst<
(outs PredRegs:$Pd4),
(ins DoubleRegs:$Rss32, DoubleRegs:$Rtt32),
"$Pd4 = boundscheck($Rss32,$Rtt32):raw:lo",
-ALU64_tc_2_SLOT23, TypeALU64>, Enc_3831744 {
+tc_c58f771a, TypeALU64>, Enc_fcf7a7 {
let Inst{7-2} = 0b100000;
let Inst{13-13} = 0b1;
let Inst{31-21} = 0b11010010000;
@@ -2283,7 +2371,7 @@ def A4_cmpbeq : HInst<
(outs PredRegs:$Pd4),
(ins IntRegs:$Rs32, IntRegs:$Rt32),
"$Pd4 = cmpb.eq($Rs32,$Rt32)",
-S_3op_tc_2early_SLOT23, TypeS_3op>, Enc_10157519, ImmRegRel {
+tc_c58f771a, TypeS_3op>, Enc_c2b48e, ImmRegRel {
let Inst{7-2} = 0b110000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11000111110;
@@ -2296,7 +2384,7 @@ def A4_cmpbeqi : HInst<
(outs PredRegs:$Pd4),
(ins IntRegs:$Rs32, u8_0Imm:$Ii),
"$Pd4 = cmpb.eq($Rs32,#$Ii)",
-ALU64_tc_2early_SLOT23, TypeALU64>, Enc_6736678, ImmRegRel {
+tc_5fa2857c, TypeALU64>, Enc_08d755, ImmRegRel {
let Inst{4-2} = 0b000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11011101000;
@@ -2309,7 +2397,7 @@ def A4_cmpbgt : HInst<
(outs PredRegs:$Pd4),
(ins IntRegs:$Rs32, IntRegs:$Rt32),
"$Pd4 = cmpb.gt($Rs32,$Rt32)",
-S_3op_tc_2early_SLOT23, TypeS_3op>, Enc_10157519, ImmRegRel {
+tc_c58f771a, TypeS_3op>, Enc_c2b48e, ImmRegRel {
let Inst{7-2} = 0b010000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11000111110;
@@ -2321,7 +2409,7 @@ def A4_cmpbgti : HInst<
(outs PredRegs:$Pd4),
(ins IntRegs:$Rs32, s8_0Imm:$Ii),
"$Pd4 = cmpb.gt($Rs32,#$Ii)",
-ALU64_tc_2early_SLOT23, TypeALU64>, Enc_6736678, ImmRegRel {
+tc_5fa2857c, TypeALU64>, Enc_08d755, ImmRegRel {
let Inst{4-2} = 0b000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11011101001;
@@ -2333,7 +2421,7 @@ def A4_cmpbgtu : HInst<
(outs PredRegs:$Pd4),
(ins IntRegs:$Rs32, IntRegs:$Rt32),
"$Pd4 = cmpb.gtu($Rs32,$Rt32)",
-S_3op_tc_2early_SLOT23, TypeS_3op>, Enc_10157519, ImmRegRel {
+tc_c58f771a, TypeS_3op>, Enc_c2b48e, ImmRegRel {
let Inst{7-2} = 0b111000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11000111110;
@@ -2345,7 +2433,7 @@ def A4_cmpbgtui : HInst<
(outs PredRegs:$Pd4),
(ins IntRegs:$Rs32, u32_0Imm:$Ii),
"$Pd4 = cmpb.gtu($Rs32,#$Ii)",
-ALU64_tc_2early_SLOT23, TypeALU64>, Enc_3531000, ImmRegRel {
+tc_5fa2857c, TypeALU64>, Enc_02553a, ImmRegRel {
let Inst{4-2} = 0b000;
let Inst{13-12} = 0b00;
let Inst{31-21} = 0b11011101010;
@@ -2362,7 +2450,7 @@ def A4_cmpheq : HInst<
(outs PredRegs:$Pd4),
(ins IntRegs:$Rs32, IntRegs:$Rt32),
"$Pd4 = cmph.eq($Rs32,$Rt32)",
-S_3op_tc_2early_SLOT23, TypeS_3op>, Enc_10157519, ImmRegRel {
+tc_c58f771a, TypeS_3op>, Enc_c2b48e, ImmRegRel {
let Inst{7-2} = 0b011000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11000111110;
@@ -2375,7 +2463,7 @@ def A4_cmpheqi : HInst<
(outs PredRegs:$Pd4),
(ins IntRegs:$Rs32, s32_0Imm:$Ii),
"$Pd4 = cmph.eq($Rs32,#$Ii)",
-ALU64_tc_2early_SLOT23, TypeALU64>, Enc_6736678, ImmRegRel {
+tc_5fa2857c, TypeALU64>, Enc_08d755, ImmRegRel {
let Inst{4-2} = 0b010;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11011101000;
@@ -2393,7 +2481,7 @@ def A4_cmphgt : HInst<
(outs PredRegs:$Pd4),
(ins IntRegs:$Rs32, IntRegs:$Rt32),
"$Pd4 = cmph.gt($Rs32,$Rt32)",
-S_3op_tc_2early_SLOT23, TypeS_3op>, Enc_10157519, ImmRegRel {
+tc_c58f771a, TypeS_3op>, Enc_c2b48e, ImmRegRel {
let Inst{7-2} = 0b100000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11000111110;
@@ -2405,7 +2493,7 @@ def A4_cmphgti : HInst<
(outs PredRegs:$Pd4),
(ins IntRegs:$Rs32, s32_0Imm:$Ii),
"$Pd4 = cmph.gt($Rs32,#$Ii)",
-ALU64_tc_2early_SLOT23, TypeALU64>, Enc_6736678, ImmRegRel {
+tc_5fa2857c, TypeALU64>, Enc_08d755, ImmRegRel {
let Inst{4-2} = 0b010;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11011101001;
@@ -2422,7 +2510,7 @@ def A4_cmphgtu : HInst<
(outs PredRegs:$Pd4),
(ins IntRegs:$Rs32, IntRegs:$Rt32),
"$Pd4 = cmph.gtu($Rs32,$Rt32)",
-S_3op_tc_2early_SLOT23, TypeS_3op>, Enc_10157519, ImmRegRel {
+tc_c58f771a, TypeS_3op>, Enc_c2b48e, ImmRegRel {
let Inst{7-2} = 0b101000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11000111110;
@@ -2434,7 +2522,7 @@ def A4_cmphgtui : HInst<
(outs PredRegs:$Pd4),
(ins IntRegs:$Rs32, u32_0Imm:$Ii),
"$Pd4 = cmph.gtu($Rs32,#$Ii)",
-ALU64_tc_2early_SLOT23, TypeALU64>, Enc_3531000, ImmRegRel {
+tc_5fa2857c, TypeALU64>, Enc_02553a, ImmRegRel {
let Inst{4-2} = 0b010;
let Inst{13-12} = 0b00;
let Inst{31-21} = 0b11011101010;
@@ -2451,7 +2539,7 @@ def A4_combineii : HInst<
(outs DoubleRegs:$Rdd32),
(ins s8_0Imm:$Ii, u32_0Imm:$II),
"$Rdd32 = combine(#$Ii,#$II)",
-ALU32_2op_tc_1_SLOT0123, TypeALU32_2op>, Enc_9864697 {
+tc_548f402d, TypeALU32_2op>, Enc_f0cca7 {
let Inst{31-21} = 0b01111100100;
let isExtendable = 1;
let opExtendable = 2;
@@ -2463,7 +2551,7 @@ def A4_combineir : HInst<
(outs DoubleRegs:$Rdd32),
(ins s32_0Imm:$Ii, IntRegs:$Rs32),
"$Rdd32 = combine(#$Ii,$Rs32)",
-ALU32_2op_tc_1_SLOT0123, TypeALU32_2op>, Enc_2462143 {
+tc_548f402d, TypeALU32_2op>, Enc_9cdba7 {
let Inst{13-13} = 0b1;
let Inst{31-21} = 0b01110011001;
let isExtendable = 1;
@@ -2476,7 +2564,7 @@ def A4_combineri : HInst<
(outs DoubleRegs:$Rdd32),
(ins IntRegs:$Rs32, s32_0Imm:$Ii),
"$Rdd32 = combine($Rs32,#$Ii)",
-ALU32_2op_tc_1_SLOT0123, TypeALU32_2op>, Enc_2462143 {
+tc_548f402d, TypeALU32_2op>, Enc_9cdba7 {
let Inst{13-13} = 0b1;
let Inst{31-21} = 0b01110011000;
let isExtendable = 1;
@@ -2489,7 +2577,7 @@ def A4_cround_ri : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rs32, u5_0Imm:$Ii),
"$Rd32 = cround($Rs32,#$Ii)",
-S_2op_tc_2_SLOT23, TypeS_2op>, Enc_2771456 {
+tc_63cd9d2d, TypeS_2op>, Enc_a05677 {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b10001100111;
@@ -2501,7 +2589,7 @@ def A4_cround_rr : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rs32, IntRegs:$Rt32),
"$Rd32 = cround($Rs32,$Rt32)",
-S_3op_tc_2_SLOT23, TypeS_3op>, Enc_14071773 {
+tc_63cd9d2d, TypeS_3op>, Enc_5ab2be {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11000110110;
@@ -2513,14 +2601,14 @@ def A4_ext : HInst<
(outs),
(ins u26_6Imm:$Ii),
"immext(#$Ii)",
-EXTENDER_tc_1_SLOT0123, TypeEXTENDER>, Enc_2082956 {
+tc_9a13af9d, TypeEXTENDER>, Enc_2b518f {
let Inst{31-28} = 0b0000;
}
def A4_modwrapu : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rs32, IntRegs:$Rt32),
"$Rd32 = modwrap($Rs32,$Rt32)",
-ALU64_tc_2_SLOT23, TypeALU64>, Enc_14071773 {
+tc_47ab9233, TypeALU64>, Enc_5ab2be {
let Inst{7-5} = 0b111;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11010011111;
@@ -2532,7 +2620,7 @@ def A4_orn : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rt32, IntRegs:$Rs32),
"$Rd32 = or($Rt32,~$Rs32)",
-ALU32_3op_tc_1_SLOT0123, TypeALU32_3op>, Enc_8605375 {
+tc_548f402d, TypeALU32_3op>, Enc_bd6011 {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11110001101;
@@ -2544,7 +2632,7 @@ def A4_ornp : HInst<
(outs DoubleRegs:$Rdd32),
(ins DoubleRegs:$Rtt32, DoubleRegs:$Rss32),
"$Rdd32 = or($Rtt32,~$Rss32)",
-ALU64_tc_1_SLOT23, TypeALU64>, Enc_11687333 {
+tc_9c18c9a5, TypeALU64>, Enc_ea23e4 {
let Inst{7-5} = 0b011;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11010011111;
@@ -2553,7 +2641,7 @@ def A4_paslhf : HInst<
(outs IntRegs:$Rd32),
(ins PredRegs:$Pu4, IntRegs:$Rs32),
"if (!$Pu4) $Rd32 = aslh($Rs32)",
-ALU32_2op_tc_1_SLOT0123, TypeALU32_2op>, Enc_9422954, PredNewRel {
+tc_548f402d, TypeALU32_2op>, Enc_fb6577, PredNewRel {
let Inst{7-5} = 0b000;
let Inst{13-10} = 0b1010;
let Inst{31-21} = 0b01110000000;
@@ -2567,7 +2655,7 @@ def A4_paslhfnew : HInst<
(outs IntRegs:$Rd32),
(ins PredRegs:$Pu4, IntRegs:$Rs32),
"if (!$Pu4.new) $Rd32 = aslh($Rs32)",
-ALU32_2op_tc_1_SLOT0123, TypeALU32_2op>, Enc_9422954, PredNewRel {
+tc_b08be45e, TypeALU32_2op>, Enc_fb6577, PredNewRel {
let Inst{7-5} = 0b000;
let Inst{13-10} = 0b1011;
let Inst{31-21} = 0b01110000000;
@@ -2582,7 +2670,7 @@ def A4_paslht : HInst<
(outs IntRegs:$Rd32),
(ins PredRegs:$Pu4, IntRegs:$Rs32),
"if ($Pu4) $Rd32 = aslh($Rs32)",
-ALU32_2op_tc_1_SLOT0123, TypeALU32_2op>, Enc_9422954, PredNewRel {
+tc_548f402d, TypeALU32_2op>, Enc_fb6577, PredNewRel {
let Inst{7-5} = 0b000;
let Inst{13-10} = 0b1000;
let Inst{31-21} = 0b01110000000;
@@ -2595,7 +2683,7 @@ def A4_paslhtnew : HInst<
(outs IntRegs:$Rd32),
(ins PredRegs:$Pu4, IntRegs:$Rs32),
"if ($Pu4.new) $Rd32 = aslh($Rs32)",
-ALU32_2op_tc_1_SLOT0123, TypeALU32_2op>, Enc_9422954, PredNewRel {
+tc_b08be45e, TypeALU32_2op>, Enc_fb6577, PredNewRel {
let Inst{7-5} = 0b000;
let Inst{13-10} = 0b1001;
let Inst{31-21} = 0b01110000000;
@@ -2609,7 +2697,7 @@ def A4_pasrhf : HInst<
(outs IntRegs:$Rd32),
(ins PredRegs:$Pu4, IntRegs:$Rs32),
"if (!$Pu4) $Rd32 = asrh($Rs32)",
-ALU32_2op_tc_1_SLOT0123, TypeALU32_2op>, Enc_9422954, PredNewRel {
+tc_548f402d, TypeALU32_2op>, Enc_fb6577, PredNewRel {
let Inst{7-5} = 0b000;
let Inst{13-10} = 0b1010;
let Inst{31-21} = 0b01110000001;
@@ -2623,7 +2711,7 @@ def A4_pasrhfnew : HInst<
(outs IntRegs:$Rd32),
(ins PredRegs:$Pu4, IntRegs:$Rs32),
"if (!$Pu4.new) $Rd32 = asrh($Rs32)",
-ALU32_2op_tc_1_SLOT0123, TypeALU32_2op>, Enc_9422954, PredNewRel {
+tc_b08be45e, TypeALU32_2op>, Enc_fb6577, PredNewRel {
let Inst{7-5} = 0b000;
let Inst{13-10} = 0b1011;
let Inst{31-21} = 0b01110000001;
@@ -2638,7 +2726,7 @@ def A4_pasrht : HInst<
(outs IntRegs:$Rd32),
(ins PredRegs:$Pu4, IntRegs:$Rs32),
"if ($Pu4) $Rd32 = asrh($Rs32)",
-ALU32_2op_tc_1_SLOT0123, TypeALU32_2op>, Enc_9422954, PredNewRel {
+tc_548f402d, TypeALU32_2op>, Enc_fb6577, PredNewRel {
let Inst{7-5} = 0b000;
let Inst{13-10} = 0b1000;
let Inst{31-21} = 0b01110000001;
@@ -2651,7 +2739,7 @@ def A4_pasrhtnew : HInst<
(outs IntRegs:$Rd32),
(ins PredRegs:$Pu4, IntRegs:$Rs32),
"if ($Pu4.new) $Rd32 = asrh($Rs32)",
-ALU32_2op_tc_1_SLOT0123, TypeALU32_2op>, Enc_9422954, PredNewRel {
+tc_b08be45e, TypeALU32_2op>, Enc_fb6577, PredNewRel {
let Inst{7-5} = 0b000;
let Inst{13-10} = 0b1001;
let Inst{31-21} = 0b01110000001;
@@ -2665,7 +2753,7 @@ def A4_psxtbf : HInst<
(outs IntRegs:$Rd32),
(ins PredRegs:$Pu4, IntRegs:$Rs32),
"if (!$Pu4) $Rd32 = sxtb($Rs32)",
-ALU32_2op_tc_1_SLOT0123, TypeALU32_2op>, Enc_9422954, PredNewRel {
+tc_548f402d, TypeALU32_2op>, Enc_fb6577, PredNewRel {
let Inst{7-5} = 0b000;
let Inst{13-10} = 0b1010;
let Inst{31-21} = 0b01110000101;
@@ -2679,7 +2767,7 @@ def A4_psxtbfnew : HInst<
(outs IntRegs:$Rd32),
(ins PredRegs:$Pu4, IntRegs:$Rs32),
"if (!$Pu4.new) $Rd32 = sxtb($Rs32)",
-ALU32_2op_tc_1_SLOT0123, TypeALU32_2op>, Enc_9422954, PredNewRel {
+tc_b08be45e, TypeALU32_2op>, Enc_fb6577, PredNewRel {
let Inst{7-5} = 0b000;
let Inst{13-10} = 0b1011;
let Inst{31-21} = 0b01110000101;
@@ -2694,7 +2782,7 @@ def A4_psxtbt : HInst<
(outs IntRegs:$Rd32),
(ins PredRegs:$Pu4, IntRegs:$Rs32),
"if ($Pu4) $Rd32 = sxtb($Rs32)",
-ALU32_2op_tc_1_SLOT0123, TypeALU32_2op>, Enc_9422954, PredNewRel {
+tc_548f402d, TypeALU32_2op>, Enc_fb6577, PredNewRel {
let Inst{7-5} = 0b000;
let Inst{13-10} = 0b1000;
let Inst{31-21} = 0b01110000101;
@@ -2707,7 +2795,7 @@ def A4_psxtbtnew : HInst<
(outs IntRegs:$Rd32),
(ins PredRegs:$Pu4, IntRegs:$Rs32),
"if ($Pu4.new) $Rd32 = sxtb($Rs32)",
-ALU32_2op_tc_1_SLOT0123, TypeALU32_2op>, Enc_9422954, PredNewRel {
+tc_b08be45e, TypeALU32_2op>, Enc_fb6577, PredNewRel {
let Inst{7-5} = 0b000;
let Inst{13-10} = 0b1001;
let Inst{31-21} = 0b01110000101;
@@ -2721,7 +2809,7 @@ def A4_psxthf : HInst<
(outs IntRegs:$Rd32),
(ins PredRegs:$Pu4, IntRegs:$Rs32),
"if (!$Pu4) $Rd32 = sxth($Rs32)",
-ALU32_2op_tc_1_SLOT0123, TypeALU32_2op>, Enc_9422954, PredNewRel {
+tc_548f402d, TypeALU32_2op>, Enc_fb6577, PredNewRel {
let Inst{7-5} = 0b000;
let Inst{13-10} = 0b1010;
let Inst{31-21} = 0b01110000111;
@@ -2735,7 +2823,7 @@ def A4_psxthfnew : HInst<
(outs IntRegs:$Rd32),
(ins PredRegs:$Pu4, IntRegs:$Rs32),
"if (!$Pu4.new) $Rd32 = sxth($Rs32)",
-ALU32_2op_tc_1_SLOT0123, TypeALU32_2op>, Enc_9422954, PredNewRel {
+tc_b08be45e, TypeALU32_2op>, Enc_fb6577, PredNewRel {
let Inst{7-5} = 0b000;
let Inst{13-10} = 0b1011;
let Inst{31-21} = 0b01110000111;
@@ -2750,7 +2838,7 @@ def A4_psxtht : HInst<
(outs IntRegs:$Rd32),
(ins PredRegs:$Pu4, IntRegs:$Rs32),
"if ($Pu4) $Rd32 = sxth($Rs32)",
-ALU32_2op_tc_1_SLOT0123, TypeALU32_2op>, Enc_9422954, PredNewRel {
+tc_548f402d, TypeALU32_2op>, Enc_fb6577, PredNewRel {
let Inst{7-5} = 0b000;
let Inst{13-10} = 0b1000;
let Inst{31-21} = 0b01110000111;
@@ -2763,7 +2851,7 @@ def A4_psxthtnew : HInst<
(outs IntRegs:$Rd32),
(ins PredRegs:$Pu4, IntRegs:$Rs32),
"if ($Pu4.new) $Rd32 = sxth($Rs32)",
-ALU32_2op_tc_1_SLOT0123, TypeALU32_2op>, Enc_9422954, PredNewRel {
+tc_b08be45e, TypeALU32_2op>, Enc_fb6577, PredNewRel {
let Inst{7-5} = 0b000;
let Inst{13-10} = 0b1001;
let Inst{31-21} = 0b01110000111;
@@ -2777,7 +2865,7 @@ def A4_pzxtbf : HInst<
(outs IntRegs:$Rd32),
(ins PredRegs:$Pu4, IntRegs:$Rs32),
"if (!$Pu4) $Rd32 = zxtb($Rs32)",
-ALU32_2op_tc_1_SLOT0123, TypeALU32_2op>, Enc_9422954, PredNewRel {
+tc_548f402d, TypeALU32_2op>, Enc_fb6577, PredNewRel {
let Inst{7-5} = 0b000;
let Inst{13-10} = 0b1010;
let Inst{31-21} = 0b01110000100;
@@ -2791,7 +2879,7 @@ def A4_pzxtbfnew : HInst<
(outs IntRegs:$Rd32),
(ins PredRegs:$Pu4, IntRegs:$Rs32),
"if (!$Pu4.new) $Rd32 = zxtb($Rs32)",
-ALU32_2op_tc_1_SLOT0123, TypeALU32_2op>, Enc_9422954, PredNewRel {
+tc_b08be45e, TypeALU32_2op>, Enc_fb6577, PredNewRel {
let Inst{7-5} = 0b000;
let Inst{13-10} = 0b1011;
let Inst{31-21} = 0b01110000100;
@@ -2806,7 +2894,7 @@ def A4_pzxtbt : HInst<
(outs IntRegs:$Rd32),
(ins PredRegs:$Pu4, IntRegs:$Rs32),
"if ($Pu4) $Rd32 = zxtb($Rs32)",
-ALU32_2op_tc_1_SLOT0123, TypeALU32_2op>, Enc_9422954, PredNewRel {
+tc_548f402d, TypeALU32_2op>, Enc_fb6577, PredNewRel {
let Inst{7-5} = 0b000;
let Inst{13-10} = 0b1000;
let Inst{31-21} = 0b01110000100;
@@ -2819,7 +2907,7 @@ def A4_pzxtbtnew : HInst<
(outs IntRegs:$Rd32),
(ins PredRegs:$Pu4, IntRegs:$Rs32),
"if ($Pu4.new) $Rd32 = zxtb($Rs32)",
-ALU32_2op_tc_1_SLOT0123, TypeALU32_2op>, Enc_9422954, PredNewRel {
+tc_b08be45e, TypeALU32_2op>, Enc_fb6577, PredNewRel {
let Inst{7-5} = 0b000;
let Inst{13-10} = 0b1001;
let Inst{31-21} = 0b01110000100;
@@ -2833,7 +2921,7 @@ def A4_pzxthf : HInst<
(outs IntRegs:$Rd32),
(ins PredRegs:$Pu4, IntRegs:$Rs32),
"if (!$Pu4) $Rd32 = zxth($Rs32)",
-ALU32_2op_tc_1_SLOT0123, TypeALU32_2op>, Enc_9422954, PredNewRel {
+tc_548f402d, TypeALU32_2op>, Enc_fb6577, PredNewRel {
let Inst{7-5} = 0b000;
let Inst{13-10} = 0b1010;
let Inst{31-21} = 0b01110000110;
@@ -2847,7 +2935,7 @@ def A4_pzxthfnew : HInst<
(outs IntRegs:$Rd32),
(ins PredRegs:$Pu4, IntRegs:$Rs32),
"if (!$Pu4.new) $Rd32 = zxth($Rs32)",
-ALU32_2op_tc_1_SLOT0123, TypeALU32_2op>, Enc_9422954, PredNewRel {
+tc_b08be45e, TypeALU32_2op>, Enc_fb6577, PredNewRel {
let Inst{7-5} = 0b000;
let Inst{13-10} = 0b1011;
let Inst{31-21} = 0b01110000110;
@@ -2862,7 +2950,7 @@ def A4_pzxtht : HInst<
(outs IntRegs:$Rd32),
(ins PredRegs:$Pu4, IntRegs:$Rs32),
"if ($Pu4) $Rd32 = zxth($Rs32)",
-ALU32_2op_tc_1_SLOT0123, TypeALU32_2op>, Enc_9422954, PredNewRel {
+tc_548f402d, TypeALU32_2op>, Enc_fb6577, PredNewRel {
let Inst{7-5} = 0b000;
let Inst{13-10} = 0b1000;
let Inst{31-21} = 0b01110000110;
@@ -2875,7 +2963,7 @@ def A4_pzxthtnew : HInst<
(outs IntRegs:$Rd32),
(ins PredRegs:$Pu4, IntRegs:$Rs32),
"if ($Pu4.new) $Rd32 = zxth($Rs32)",
-ALU32_2op_tc_1_SLOT0123, TypeALU32_2op>, Enc_9422954, PredNewRel {
+tc_b08be45e, TypeALU32_2op>, Enc_fb6577, PredNewRel {
let Inst{7-5} = 0b000;
let Inst{13-10} = 0b1001;
let Inst{31-21} = 0b01110000110;
@@ -2889,7 +2977,7 @@ def A4_rcmpeq : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rs32, IntRegs:$Rt32),
"$Rd32 = cmp.eq($Rs32,$Rt32)",
-ALU32_3op_tc_1_SLOT0123, TypeALU32_3op>, Enc_14071773, ImmRegRel {
+tc_548f402d, TypeALU32_3op>, Enc_5ab2be, ImmRegRel {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11110011010;
@@ -2903,7 +2991,7 @@ def A4_rcmpeqi : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rs32, s32_0Imm:$Ii),
"$Rd32 = cmp.eq($Rs32,#$Ii)",
-ALU32_2op_tc_1_SLOT0123, TypeALU32_2op>, Enc_16355964, ImmRegRel {
+tc_548f402d, TypeALU32_2op>, Enc_b8c967, ImmRegRel {
let Inst{13-13} = 0b1;
let Inst{31-21} = 0b01110011010;
let hasNewValue = 1;
@@ -2920,7 +3008,7 @@ def A4_rcmpneq : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rs32, IntRegs:$Rt32),
"$Rd32 = !cmp.eq($Rs32,$Rt32)",
-ALU32_3op_tc_1_SLOT0123, TypeALU32_3op>, Enc_14071773, ImmRegRel {
+tc_548f402d, TypeALU32_3op>, Enc_5ab2be, ImmRegRel {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11110011011;
@@ -2934,7 +3022,7 @@ def A4_rcmpneqi : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rs32, s32_0Imm:$Ii),
"$Rd32 = !cmp.eq($Rs32,#$Ii)",
-ALU32_2op_tc_1_SLOT0123, TypeALU32_2op>, Enc_16355964, ImmRegRel {
+tc_548f402d, TypeALU32_2op>, Enc_b8c967, ImmRegRel {
let Inst{13-13} = 0b1;
let Inst{31-21} = 0b01110011011;
let hasNewValue = 1;
@@ -2951,7 +3039,7 @@ def A4_round_ri : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rs32, u5_0Imm:$Ii),
"$Rd32 = round($Rs32,#$Ii)",
-S_2op_tc_2_SLOT23, TypeS_2op>, Enc_2771456 {
+tc_63cd9d2d, TypeS_2op>, Enc_a05677 {
let Inst{7-5} = 0b100;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b10001100111;
@@ -2963,7 +3051,7 @@ def A4_round_ri_sat : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rs32, u5_0Imm:$Ii),
"$Rd32 = round($Rs32,#$Ii):sat",
-S_2op_tc_2_SLOT23, TypeS_2op>, Enc_2771456 {
+tc_63cd9d2d, TypeS_2op>, Enc_a05677 {
let Inst{7-5} = 0b110;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b10001100111;
@@ -2976,7 +3064,7 @@ def A4_round_rr : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rs32, IntRegs:$Rt32),
"$Rd32 = round($Rs32,$Rt32)",
-S_3op_tc_2_SLOT23, TypeS_3op>, Enc_14071773 {
+tc_63cd9d2d, TypeS_3op>, Enc_5ab2be {
let Inst{7-5} = 0b100;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11000110110;
@@ -2988,7 +3076,7 @@ def A4_round_rr_sat : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rs32, IntRegs:$Rt32),
"$Rd32 = round($Rs32,$Rt32):sat",
-S_3op_tc_2_SLOT23, TypeS_3op>, Enc_14071773 {
+tc_63cd9d2d, TypeS_3op>, Enc_5ab2be {
let Inst{7-5} = 0b110;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11000110110;
@@ -3001,7 +3089,7 @@ def A4_subp_c : HInst<
(outs DoubleRegs:$Rdd32, PredRegs:$Px4),
(ins DoubleRegs:$Rss32, DoubleRegs:$Rtt32, PredRegs:$Px4in),
"$Rdd32 = sub($Rss32,$Rtt32,$Px4):carry",
-S_3op_tc_1_SLOT23, TypeS_3op>, Enc_151014 {
+tc_a87879e8, TypeS_3op>, Enc_2b3f60 {
let Inst{7-7} = 0b0;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11000010111;
@@ -3012,7 +3100,7 @@ def A4_tfrcpp : HInst<
(outs DoubleRegs:$Rdd32),
(ins CtrRegs64:$Css32),
"$Rdd32 = $Css32",
-CR_tc_3x_SLOT3, TypeCR>, Enc_13094118 {
+tc_3b4892c6, TypeCR>, Enc_667b39 {
let Inst{13-5} = 0b000000000;
let Inst{31-21} = 0b01101000000;
}
@@ -3020,7 +3108,7 @@ def A4_tfrpcp : HInst<
(outs CtrRegs64:$Cdd32),
(ins DoubleRegs:$Rss32),
"$Cdd32 = $Rss32",
-CR_tc_3x_SLOT3, TypeCR>, Enc_1329520 {
+tc_82f0f122, TypeCR>, Enc_0ed752 {
let Inst{13-5} = 0b000000000;
let Inst{31-21} = 0b01100011001;
}
@@ -3028,7 +3116,7 @@ def A4_tlbmatch : HInst<
(outs PredRegs:$Pd4),
(ins DoubleRegs:$Rss32, IntRegs:$Rt32),
"$Pd4 = tlbmatch($Rss32,$Rt32)",
-ALU64_tc_2early_SLOT23, TypeALU64>, Enc_2492727 {
+tc_e2c08bb4, TypeALU64>, Enc_03833b {
let Inst{7-2} = 0b011000;
let Inst{13-13} = 0b1;
let Inst{31-21} = 0b11010010000;
@@ -3038,7 +3126,7 @@ def A4_vcmpbeq_any : HInst<
(outs PredRegs:$Pd4),
(ins DoubleRegs:$Rss32, DoubleRegs:$Rtt32),
"$Pd4 = any8(vcmpb.eq($Rss32,$Rtt32))",
-ALU64_tc_2early_SLOT23, TypeALU64>, Enc_3831744 {
+tc_c58f771a, TypeALU64>, Enc_fcf7a7 {
let Inst{7-2} = 0b000000;
let Inst{13-13} = 0b1;
let Inst{31-21} = 0b11010010000;
@@ -3047,7 +3135,7 @@ def A4_vcmpbeqi : HInst<
(outs PredRegs:$Pd4),
(ins DoubleRegs:$Rss32, u8_0Imm:$Ii),
"$Pd4 = vcmpb.eq($Rss32,#$Ii)",
-ALU64_tc_2early_SLOT23, TypeALU64>, Enc_13455308 {
+tc_5fa2857c, TypeALU64>, Enc_0d8adb {
let Inst{4-2} = 0b000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11011100000;
@@ -3056,7 +3144,7 @@ def A4_vcmpbgt : HInst<
(outs PredRegs:$Pd4),
(ins DoubleRegs:$Rss32, DoubleRegs:$Rtt32),
"$Pd4 = vcmpb.gt($Rss32,$Rtt32)",
-ALU64_tc_2early_SLOT23, TypeALU64>, Enc_3831744 {
+tc_c58f771a, TypeALU64>, Enc_fcf7a7 {
let Inst{7-2} = 0b010000;
let Inst{13-13} = 0b1;
let Inst{31-21} = 0b11010010000;
@@ -3065,7 +3153,7 @@ def A4_vcmpbgti : HInst<
(outs PredRegs:$Pd4),
(ins DoubleRegs:$Rss32, s8_0Imm:$Ii),
"$Pd4 = vcmpb.gt($Rss32,#$Ii)",
-ALU64_tc_2early_SLOT23, TypeALU64>, Enc_13455308 {
+tc_5fa2857c, TypeALU64>, Enc_0d8adb {
let Inst{4-2} = 0b000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11011100001;
@@ -3074,7 +3162,7 @@ def A4_vcmpbgtui : HInst<
(outs PredRegs:$Pd4),
(ins DoubleRegs:$Rss32, u7_0Imm:$Ii),
"$Pd4 = vcmpb.gtu($Rss32,#$Ii)",
-ALU64_tc_2early_SLOT23, TypeALU64>, Enc_2968094 {
+tc_5fa2857c, TypeALU64>, Enc_3680c2 {
let Inst{4-2} = 0b000;
let Inst{13-12} = 0b00;
let Inst{31-21} = 0b11011100010;
@@ -3083,7 +3171,7 @@ def A4_vcmpheqi : HInst<
(outs PredRegs:$Pd4),
(ins DoubleRegs:$Rss32, s8_0Imm:$Ii),
"$Pd4 = vcmph.eq($Rss32,#$Ii)",
-ALU64_tc_2early_SLOT23, TypeALU64>, Enc_13455308 {
+tc_5fa2857c, TypeALU64>, Enc_0d8adb {
let Inst{4-2} = 0b010;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11011100000;
@@ -3092,7 +3180,7 @@ def A4_vcmphgti : HInst<
(outs PredRegs:$Pd4),
(ins DoubleRegs:$Rss32, s8_0Imm:$Ii),
"$Pd4 = vcmph.gt($Rss32,#$Ii)",
-ALU64_tc_2early_SLOT23, TypeALU64>, Enc_13455308 {
+tc_5fa2857c, TypeALU64>, Enc_0d8adb {
let Inst{4-2} = 0b010;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11011100001;
@@ -3101,7 +3189,7 @@ def A4_vcmphgtui : HInst<
(outs PredRegs:$Pd4),
(ins DoubleRegs:$Rss32, u7_0Imm:$Ii),
"$Pd4 = vcmph.gtu($Rss32,#$Ii)",
-ALU64_tc_2early_SLOT23, TypeALU64>, Enc_2968094 {
+tc_5fa2857c, TypeALU64>, Enc_3680c2 {
let Inst{4-2} = 0b010;
let Inst{13-12} = 0b00;
let Inst{31-21} = 0b11011100010;
@@ -3110,7 +3198,7 @@ def A4_vcmpweqi : HInst<
(outs PredRegs:$Pd4),
(ins DoubleRegs:$Rss32, s8_0Imm:$Ii),
"$Pd4 = vcmpw.eq($Rss32,#$Ii)",
-ALU64_tc_2early_SLOT23, TypeALU64>, Enc_13455308 {
+tc_5fa2857c, TypeALU64>, Enc_0d8adb {
let Inst{4-2} = 0b100;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11011100000;
@@ -3119,7 +3207,7 @@ def A4_vcmpwgti : HInst<
(outs PredRegs:$Pd4),
(ins DoubleRegs:$Rss32, s8_0Imm:$Ii),
"$Pd4 = vcmpw.gt($Rss32,#$Ii)",
-ALU64_tc_2early_SLOT23, TypeALU64>, Enc_13455308 {
+tc_5fa2857c, TypeALU64>, Enc_0d8adb {
let Inst{4-2} = 0b100;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11011100001;
@@ -3128,7 +3216,7 @@ def A4_vcmpwgtui : HInst<
(outs PredRegs:$Pd4),
(ins DoubleRegs:$Rss32, u7_0Imm:$Ii),
"$Pd4 = vcmpw.gtu($Rss32,#$Ii)",
-ALU64_tc_2early_SLOT23, TypeALU64>, Enc_2968094 {
+tc_5fa2857c, TypeALU64>, Enc_3680c2 {
let Inst{4-2} = 0b100;
let Inst{13-12} = 0b00;
let Inst{31-21} = 0b11011100010;
@@ -3137,7 +3225,7 @@ def A4_vrmaxh : HInst<
(outs DoubleRegs:$Rxx32),
(ins DoubleRegs:$Rxx32in, DoubleRegs:$Rss32, IntRegs:$Ru32),
"$Rxx32 = vrmaxh($Rss32,$Ru32)",
-S_3op_tc_3_SLOT23, TypeS_3op>, Enc_9773189 {
+tc_2aaab1e0, TypeS_3op>, Enc_412ff0 {
let Inst{7-5} = 0b001;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11001011001;
@@ -3148,7 +3236,7 @@ def A4_vrmaxuh : HInst<
(outs DoubleRegs:$Rxx32),
(ins DoubleRegs:$Rxx32in, DoubleRegs:$Rss32, IntRegs:$Ru32),
"$Rxx32 = vrmaxuh($Rss32,$Ru32)",
-S_3op_tc_3_SLOT23, TypeS_3op>, Enc_9773189 {
+tc_2aaab1e0, TypeS_3op>, Enc_412ff0 {
let Inst{7-5} = 0b001;
let Inst{13-13} = 0b1;
let Inst{31-21} = 0b11001011001;
@@ -3159,7 +3247,7 @@ def A4_vrmaxuw : HInst<
(outs DoubleRegs:$Rxx32),
(ins DoubleRegs:$Rxx32in, DoubleRegs:$Rss32, IntRegs:$Ru32),
"$Rxx32 = vrmaxuw($Rss32,$Ru32)",
-S_3op_tc_3_SLOT23, TypeS_3op>, Enc_9773189 {
+tc_2aaab1e0, TypeS_3op>, Enc_412ff0 {
let Inst{7-5} = 0b010;
let Inst{13-13} = 0b1;
let Inst{31-21} = 0b11001011001;
@@ -3170,7 +3258,7 @@ def A4_vrmaxw : HInst<
(outs DoubleRegs:$Rxx32),
(ins DoubleRegs:$Rxx32in, DoubleRegs:$Rss32, IntRegs:$Ru32),
"$Rxx32 = vrmaxw($Rss32,$Ru32)",
-S_3op_tc_3_SLOT23, TypeS_3op>, Enc_9773189 {
+tc_2aaab1e0, TypeS_3op>, Enc_412ff0 {
let Inst{7-5} = 0b010;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11001011001;
@@ -3181,7 +3269,7 @@ def A4_vrminh : HInst<
(outs DoubleRegs:$Rxx32),
(ins DoubleRegs:$Rxx32in, DoubleRegs:$Rss32, IntRegs:$Ru32),
"$Rxx32 = vrminh($Rss32,$Ru32)",
-S_3op_tc_3_SLOT23, TypeS_3op>, Enc_9773189 {
+tc_2aaab1e0, TypeS_3op>, Enc_412ff0 {
let Inst{7-5} = 0b101;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11001011001;
@@ -3192,7 +3280,7 @@ def A4_vrminuh : HInst<
(outs DoubleRegs:$Rxx32),
(ins DoubleRegs:$Rxx32in, DoubleRegs:$Rss32, IntRegs:$Ru32),
"$Rxx32 = vrminuh($Rss32,$Ru32)",
-S_3op_tc_3_SLOT23, TypeS_3op>, Enc_9773189 {
+tc_2aaab1e0, TypeS_3op>, Enc_412ff0 {
let Inst{7-5} = 0b101;
let Inst{13-13} = 0b1;
let Inst{31-21} = 0b11001011001;
@@ -3203,7 +3291,7 @@ def A4_vrminuw : HInst<
(outs DoubleRegs:$Rxx32),
(ins DoubleRegs:$Rxx32in, DoubleRegs:$Rss32, IntRegs:$Ru32),
"$Rxx32 = vrminuw($Rss32,$Ru32)",
-S_3op_tc_3_SLOT23, TypeS_3op>, Enc_9773189 {
+tc_2aaab1e0, TypeS_3op>, Enc_412ff0 {
let Inst{7-5} = 0b110;
let Inst{13-13} = 0b1;
let Inst{31-21} = 0b11001011001;
@@ -3214,7 +3302,7 @@ def A4_vrminw : HInst<
(outs DoubleRegs:$Rxx32),
(ins DoubleRegs:$Rxx32in, DoubleRegs:$Rss32, IntRegs:$Ru32),
"$Rxx32 = vrminw($Rss32,$Ru32)",
-S_3op_tc_3_SLOT23, TypeS_3op>, Enc_9773189 {
+tc_2aaab1e0, TypeS_3op>, Enc_412ff0 {
let Inst{7-5} = 0b110;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11001011001;
@@ -3225,7 +3313,7 @@ def A5_ACS : HInst<
(outs DoubleRegs:$Rxx32, PredRegs:$Pe4),
(ins DoubleRegs:$Rxx32in, DoubleRegs:$Rss32, DoubleRegs:$Rtt32),
"$Rxx32,$Pe4 = vacsh($Rss32,$Rtt32)",
-M_tc_3stall_SLOT23, TypeM>, Enc_12822813, Requires<[HasV55T]> {
+tc_ae0722f7, TypeM>, Enc_831a7d, Requires<[HasV55T]> {
let Inst{7-7} = 0b0;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101010101;
@@ -3238,7 +3326,7 @@ def A5_vaddhubs : HInst<
(outs IntRegs:$Rd32),
(ins DoubleRegs:$Rss32, DoubleRegs:$Rtt32),
"$Rd32 = vaddhub($Rss32,$Rtt32):sat",
-S_3op_tc_2_SLOT23, TypeS_3op>, Enc_9277990, Requires<[HasV5T]> {
+tc_63cd9d2d, TypeS_3op>, Enc_d2216a, Requires<[HasV5T]> {
let Inst{7-5} = 0b001;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11000001010;
@@ -3251,7 +3339,7 @@ def A6_vminub_RdP : HInst<
(outs DoubleRegs:$Rdd32, PredRegs:$Pe4),
(ins DoubleRegs:$Rtt32, DoubleRegs:$Rss32),
"$Rdd32,$Pe4 = vminub($Rtt32,$Rss32)",
-M_tc_2_SLOT23, TypeM>, Enc_766909, Requires<[HasV62T]> {
+tc_583510c7, TypeM>, Enc_d2c7f1, Requires<[HasV62T]> {
let Inst{7-7} = 0b0;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101010111;
@@ -3262,7 +3350,7 @@ def C2_all8 : HInst<
(outs PredRegs:$Pd4),
(ins PredRegs:$Ps4),
"$Pd4 = all8($Ps4)",
-CR_tc_2early_SLOT23, TypeCR>, Enc_6975103 {
+tc_81a23d44, TypeCR>, Enc_65d691 {
let Inst{13-2} = 0b000000000000;
let Inst{31-18} = 0b01101011101000;
}
@@ -3270,7 +3358,7 @@ def C2_and : HInst<
(outs PredRegs:$Pd4),
(ins PredRegs:$Pt4, PredRegs:$Ps4),
"$Pd4 = and($Pt4,$Ps4)",
-CR_tc_2early_SLOT23, TypeCR>, Enc_8891794 {
+tc_d63b71d1, TypeCR>, Enc_454a26 {
let Inst{7-2} = 0b000000;
let Inst{13-10} = 0b0000;
let Inst{31-18} = 0b01101011000000;
@@ -3279,7 +3367,7 @@ def C2_andn : HInst<
(outs PredRegs:$Pd4),
(ins PredRegs:$Pt4, PredRegs:$Ps4),
"$Pd4 = and($Pt4,!$Ps4)",
-CR_tc_2early_SLOT23, TypeCR>, Enc_8891794 {
+tc_d63b71d1, TypeCR>, Enc_454a26 {
let Inst{7-2} = 0b000000;
let Inst{13-10} = 0b0000;
let Inst{31-18} = 0b01101011011000;
@@ -3288,7 +3376,7 @@ def C2_any8 : HInst<
(outs PredRegs:$Pd4),
(ins PredRegs:$Ps4),
"$Pd4 = any8($Ps4)",
-CR_tc_2early_SLOT23, TypeCR>, Enc_6975103 {
+tc_81a23d44, TypeCR>, Enc_65d691 {
let Inst{13-2} = 0b000000000000;
let Inst{31-18} = 0b01101011100000;
}
@@ -3296,7 +3384,7 @@ def C2_bitsclr : HInst<
(outs PredRegs:$Pd4),
(ins IntRegs:$Rs32, IntRegs:$Rt32),
"$Pd4 = bitsclr($Rs32,$Rt32)",
-S_3op_tc_2early_SLOT23, TypeS_3op>, Enc_10157519 {
+tc_c58f771a, TypeS_3op>, Enc_c2b48e {
let Inst{7-2} = 0b000000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11000111100;
@@ -3305,7 +3393,7 @@ def C2_bitsclri : HInst<
(outs PredRegs:$Pd4),
(ins IntRegs:$Rs32, u6_0Imm:$Ii),
"$Pd4 = bitsclr($Rs32,#$Ii)",
-S_2op_tc_2early_SLOT23, TypeS_2op>, Enc_14574598 {
+tc_5fa2857c, TypeS_2op>, Enc_5d6c34 {
let Inst{7-2} = 0b000000;
let Inst{31-21} = 0b10000101100;
}
@@ -3313,7 +3401,7 @@ def C2_bitsset : HInst<
(outs PredRegs:$Pd4),
(ins IntRegs:$Rs32, IntRegs:$Rt32),
"$Pd4 = bitsset($Rs32,$Rt32)",
-S_3op_tc_2early_SLOT23, TypeS_3op>, Enc_10157519 {
+tc_c58f771a, TypeS_3op>, Enc_c2b48e {
let Inst{7-2} = 0b000000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11000111010;
@@ -3322,7 +3410,7 @@ def C2_ccombinewf : HInst<
(outs DoubleRegs:$Rdd32),
(ins PredRegs:$Pu4, IntRegs:$Rs32, IntRegs:$Rt32),
"if (!$Pu4) $Rdd32 = combine($Rs32,$Rt32)",
-ALU32_3op_tc_1_SLOT0123, TypeALU32_3op>, Enc_8202458, PredNewRel {
+tc_1b6011fb, TypeALU32_3op>, Enc_cb4b4e, PredNewRel {
let Inst{7-7} = 0b1;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11111101000;
@@ -3334,7 +3422,7 @@ def C2_ccombinewnewf : HInst<
(outs DoubleRegs:$Rdd32),
(ins PredRegs:$Pu4, IntRegs:$Rs32, IntRegs:$Rt32),
"if (!$Pu4.new) $Rdd32 = combine($Rs32,$Rt32)",
-ALU32_3op_tc_1_SLOT0123, TypeALU32_3op>, Enc_8202458, PredNewRel {
+tc_28d296df, TypeALU32_3op>, Enc_cb4b4e, PredNewRel {
let Inst{7-7} = 0b1;
let Inst{13-13} = 0b1;
let Inst{31-21} = 0b11111101000;
@@ -3347,7 +3435,7 @@ def C2_ccombinewnewt : HInst<
(outs DoubleRegs:$Rdd32),
(ins PredRegs:$Pu4, IntRegs:$Rs32, IntRegs:$Rt32),
"if ($Pu4.new) $Rdd32 = combine($Rs32,$Rt32)",
-ALU32_3op_tc_1_SLOT0123, TypeALU32_3op>, Enc_8202458, PredNewRel {
+tc_28d296df, TypeALU32_3op>, Enc_cb4b4e, PredNewRel {
let Inst{7-7} = 0b0;
let Inst{13-13} = 0b1;
let Inst{31-21} = 0b11111101000;
@@ -3359,7 +3447,7 @@ def C2_ccombinewt : HInst<
(outs DoubleRegs:$Rdd32),
(ins PredRegs:$Pu4, IntRegs:$Rs32, IntRegs:$Rt32),
"if ($Pu4) $Rdd32 = combine($Rs32,$Rt32)",
-ALU32_3op_tc_1_SLOT0123, TypeALU32_3op>, Enc_8202458, PredNewRel {
+tc_1b6011fb, TypeALU32_3op>, Enc_cb4b4e, PredNewRel {
let Inst{7-7} = 0b0;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11111101000;
@@ -3370,7 +3458,7 @@ def C2_cmoveif : HInst<
(outs IntRegs:$Rd32),
(ins PredRegs:$Pu4, s32_0Imm:$Ii),
"if (!$Pu4) $Rd32 = #$Ii",
-ALU32_2op_tc_1_SLOT0123, TypeALU32_2op>, Enc_9487067, PredNewRel, ImmRegRel {
+tc_548f402d, TypeALU32_2op>, Enc_cda00a, PredNewRel, ImmRegRel {
let Inst{13-13} = 0b0;
let Inst{20-20} = 0b0;
let Inst{31-23} = 0b011111101;
@@ -3392,7 +3480,7 @@ def C2_cmoveit : HInst<
(outs IntRegs:$Rd32),
(ins PredRegs:$Pu4, s32_0Imm:$Ii),
"if ($Pu4) $Rd32 = #$Ii",
-ALU32_2op_tc_1_SLOT0123, TypeALU32_2op>, Enc_9487067, PredNewRel, ImmRegRel {
+tc_548f402d, TypeALU32_2op>, Enc_cda00a, PredNewRel, ImmRegRel {
let Inst{13-13} = 0b0;
let Inst{20-20} = 0b0;
let Inst{31-23} = 0b011111100;
@@ -3413,7 +3501,7 @@ def C2_cmovenewif : HInst<
(outs IntRegs:$Rd32),
(ins PredRegs:$Pu4, s32_0Imm:$Ii),
"if (!$Pu4.new) $Rd32 = #$Ii",
-ALU32_2op_tc_1_SLOT0123, TypeALU32_2op>, Enc_9487067, PredNewRel, ImmRegRel {
+tc_b08be45e, TypeALU32_2op>, Enc_cda00a, PredNewRel, ImmRegRel {
let Inst{13-13} = 0b1;
let Inst{20-20} = 0b0;
let Inst{31-23} = 0b011111101;
@@ -3436,7 +3524,7 @@ def C2_cmovenewit : HInst<
(outs IntRegs:$Rd32),
(ins PredRegs:$Pu4, s32_0Imm:$Ii),
"if ($Pu4.new) $Rd32 = #$Ii",
-ALU32_2op_tc_1_SLOT0123, TypeALU32_2op>, Enc_9487067, PredNewRel, ImmRegRel {
+tc_b08be45e, TypeALU32_2op>, Enc_cda00a, PredNewRel, ImmRegRel {
let Inst{13-13} = 0b1;
let Inst{20-20} = 0b0;
let Inst{31-23} = 0b011111100;
@@ -3458,7 +3546,7 @@ def C2_cmpeq : HInst<
(outs PredRegs:$Pd4),
(ins IntRegs:$Rs32, IntRegs:$Rt32),
"$Pd4 = cmp.eq($Rs32,$Rt32)",
-ALU32_3op_tc_2early_SLOT0123, TypeALU32_3op>, Enc_10157519, ImmRegRel {
+tc_5fe9fcd0, TypeALU32_3op>, Enc_c2b48e, ImmRegRel {
let Inst{7-2} = 0b000000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11110010000;
@@ -3471,7 +3559,7 @@ def C2_cmpeqi : HInst<
(outs PredRegs:$Pd4),
(ins IntRegs:$Rs32, s32_0Imm:$Ii),
"$Pd4 = cmp.eq($Rs32,#$Ii)",
-ALU32_2op_tc_2early_SLOT0123, TypeALU32_2op>, Enc_16014536, ImmRegRel {
+tc_9df8b0dc, TypeALU32_2op>, Enc_bd0b33, ImmRegRel {
let Inst{4-2} = 0b000;
let Inst{31-22} = 0b0111010100;
let CextOpcode = "C2_cmpeq";
@@ -3487,7 +3575,7 @@ def C2_cmpeqp : HInst<
(outs PredRegs:$Pd4),
(ins DoubleRegs:$Rss32, DoubleRegs:$Rtt32),
"$Pd4 = cmp.eq($Rss32,$Rtt32)",
-ALU64_tc_2early_SLOT23, TypeALU64>, Enc_3831744 {
+tc_c58f771a, TypeALU64>, Enc_fcf7a7 {
let Inst{7-2} = 0b000000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11010010100;
@@ -3498,7 +3586,7 @@ def C2_cmpgei : HInst<
(outs PredRegs:$Pd4),
(ins IntRegs:$Rs32, s8_0Imm:$Ii),
"$Pd4 = cmp.ge($Rs32,#$Ii)",
-ALU32_2op_tc_1_SLOT0123, TypeALU32_2op> {
+tc_9df8b0dc, TypeALU32_2op> {
let isCompare = 1;
let isPseudo = 1;
}
@@ -3506,7 +3594,7 @@ def C2_cmpgeui : HInst<
(outs PredRegs:$Pd4),
(ins IntRegs:$Rs32, u8_0Imm:$Ii),
"$Pd4 = cmp.geu($Rs32,#$Ii)",
-ALU32_2op_tc_1_SLOT0123, TypeALU32_2op> {
+tc_9df8b0dc, TypeALU32_2op> {
let isCompare = 1;
let isPseudo = 1;
}
@@ -3514,7 +3602,7 @@ def C2_cmpgt : HInst<
(outs PredRegs:$Pd4),
(ins IntRegs:$Rs32, IntRegs:$Rt32),
"$Pd4 = cmp.gt($Rs32,$Rt32)",
-ALU32_3op_tc_2early_SLOT0123, TypeALU32_3op>, Enc_10157519, ImmRegRel {
+tc_5fe9fcd0, TypeALU32_3op>, Enc_c2b48e, ImmRegRel {
let Inst{7-2} = 0b000000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11110010010;
@@ -3526,7 +3614,7 @@ def C2_cmpgti : HInst<
(outs PredRegs:$Pd4),
(ins IntRegs:$Rs32, s32_0Imm:$Ii),
"$Pd4 = cmp.gt($Rs32,#$Ii)",
-ALU32_2op_tc_2early_SLOT0123, TypeALU32_2op>, Enc_16014536, ImmRegRel {
+tc_9df8b0dc, TypeALU32_2op>, Enc_bd0b33, ImmRegRel {
let Inst{4-2} = 0b000;
let Inst{31-22} = 0b0111010101;
let CextOpcode = "C2_cmpgt";
@@ -3542,7 +3630,7 @@ def C2_cmpgtp : HInst<
(outs PredRegs:$Pd4),
(ins DoubleRegs:$Rss32, DoubleRegs:$Rtt32),
"$Pd4 = cmp.gt($Rss32,$Rtt32)",
-ALU64_tc_2early_SLOT23, TypeALU64>, Enc_3831744 {
+tc_c58f771a, TypeALU64>, Enc_fcf7a7 {
let Inst{7-2} = 0b010000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11010010100;
@@ -3552,7 +3640,7 @@ def C2_cmpgtu : HInst<
(outs PredRegs:$Pd4),
(ins IntRegs:$Rs32, IntRegs:$Rt32),
"$Pd4 = cmp.gtu($Rs32,$Rt32)",
-ALU32_3op_tc_2early_SLOT0123, TypeALU32_3op>, Enc_10157519, ImmRegRel {
+tc_5fe9fcd0, TypeALU32_3op>, Enc_c2b48e, ImmRegRel {
let Inst{7-2} = 0b000000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11110010011;
@@ -3564,7 +3652,7 @@ def C2_cmpgtui : HInst<
(outs PredRegs:$Pd4),
(ins IntRegs:$Rs32, u32_0Imm:$Ii),
"$Pd4 = cmp.gtu($Rs32,#$Ii)",
-ALU32_2op_tc_2early_SLOT0123, TypeALU32_2op>, Enc_13249928, ImmRegRel {
+tc_9df8b0dc, TypeALU32_2op>, Enc_c0cdde, ImmRegRel {
let Inst{4-2} = 0b000;
let Inst{31-21} = 0b01110101100;
let CextOpcode = "C2_cmpgtu";
@@ -3580,7 +3668,7 @@ def C2_cmpgtup : HInst<
(outs PredRegs:$Pd4),
(ins DoubleRegs:$Rss32, DoubleRegs:$Rtt32),
"$Pd4 = cmp.gtu($Rss32,$Rtt32)",
-ALU64_tc_2early_SLOT23, TypeALU64>, Enc_3831744 {
+tc_c58f771a, TypeALU64>, Enc_fcf7a7 {
let Inst{7-2} = 0b100000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11010010100;
@@ -3590,7 +3678,7 @@ def C2_cmplt : HInst<
(outs PredRegs:$Pd4),
(ins IntRegs:$Rs32, IntRegs:$Rt32),
"$Pd4 = cmp.lt($Rs32,$Rt32)",
-PSEUDO, TypeALU32_3op> {
+tc_9df8b0dc, TypeALU32_3op> {
let isCompare = 1;
let isPseudo = 1;
let isCodeGenOnly = 1;
@@ -3599,7 +3687,7 @@ def C2_cmpltu : HInst<
(outs PredRegs:$Pd4),
(ins IntRegs:$Rs32, IntRegs:$Rt32),
"$Pd4 = cmp.ltu($Rs32,$Rt32)",
-PSEUDO, TypeALU32_3op> {
+tc_9df8b0dc, TypeALU32_3op> {
let isCompare = 1;
let isPseudo = 1;
let isCodeGenOnly = 1;
@@ -3608,7 +3696,7 @@ def C2_mask : HInst<
(outs DoubleRegs:$Rdd32),
(ins PredRegs:$Pt4),
"$Rdd32 = mask($Pt4)",
-S_2op_tc_1_SLOT23, TypeS_2op>, Enc_10328975 {
+tc_b86c7e8b, TypeS_2op>, Enc_78e566 {
let Inst{7-5} = 0b000;
let Inst{13-10} = 0b0000;
let Inst{31-16} = 0b1000011000000000;
@@ -3617,7 +3705,7 @@ def C2_mux : HInst<
(outs IntRegs:$Rd32),
(ins PredRegs:$Pu4, IntRegs:$Rs32, IntRegs:$Rt32),
"$Rd32 = mux($Pu4,$Rs32,$Rt32)",
-ALU32_3op_tc_1_SLOT0123, TypeALU32_3op>, Enc_9626139 {
+tc_1b6011fb, TypeALU32_3op>, Enc_ea4c54 {
let Inst{7-7} = 0b0;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11110100000;
@@ -3629,7 +3717,7 @@ def C2_muxii : HInst<
(outs IntRegs:$Rd32),
(ins PredRegs:$Pu4, s32_0Imm:$Ii, s8_0Imm:$II),
"$Rd32 = mux($Pu4,#$Ii,#$II)",
-ALU32_2op_tc_1_SLOT0123, TypeALU32_2op>, Enc_9093094 {
+tc_1b6011fb, TypeALU32_2op>, Enc_830e5d {
let Inst{31-25} = 0b0111101;
let hasNewValue = 1;
let opNewValue = 0;
@@ -3643,7 +3731,7 @@ def C2_muxir : HInst<
(outs IntRegs:$Rd32),
(ins PredRegs:$Pu4, IntRegs:$Rs32, s32_0Imm:$Ii),
"$Rd32 = mux($Pu4,$Rs32,#$Ii)",
-ALU32_2op_tc_1_SLOT0123, TypeALU32_2op>, Enc_10568534 {
+tc_1b6011fb, TypeALU32_2op>, Enc_e38e1f {
let Inst{13-13} = 0b0;
let Inst{31-23} = 0b011100110;
let hasNewValue = 1;
@@ -3659,7 +3747,7 @@ def C2_muxri : HInst<
(outs IntRegs:$Rd32),
(ins PredRegs:$Pu4, s32_0Imm:$Ii, IntRegs:$Rs32),
"$Rd32 = mux($Pu4,#$Ii,$Rs32)",
-ALU32_2op_tc_1_SLOT0123, TypeALU32_2op>, Enc_10568534 {
+tc_1b6011fb, TypeALU32_2op>, Enc_e38e1f {
let Inst{13-13} = 0b0;
let Inst{31-23} = 0b011100111;
let hasNewValue = 1;
@@ -3675,7 +3763,7 @@ def C2_not : HInst<
(outs PredRegs:$Pd4),
(ins PredRegs:$Ps4),
"$Pd4 = not($Ps4)",
-CR_tc_2early_SLOT23, TypeCR>, Enc_6975103 {
+tc_81a23d44, TypeCR>, Enc_65d691 {
let Inst{13-2} = 0b000000000000;
let Inst{31-18} = 0b01101011110000;
}
@@ -3683,7 +3771,7 @@ def C2_or : HInst<
(outs PredRegs:$Pd4),
(ins PredRegs:$Pt4, PredRegs:$Ps4),
"$Pd4 = or($Pt4,$Ps4)",
-CR_tc_2early_SLOT23, TypeCR>, Enc_8891794 {
+tc_d63b71d1, TypeCR>, Enc_454a26 {
let Inst{7-2} = 0b000000;
let Inst{13-10} = 0b0000;
let Inst{31-18} = 0b01101011001000;
@@ -3692,7 +3780,7 @@ def C2_orn : HInst<
(outs PredRegs:$Pd4),
(ins PredRegs:$Pt4, PredRegs:$Ps4),
"$Pd4 = or($Pt4,!$Ps4)",
-CR_tc_2early_SLOT23, TypeCR>, Enc_8891794 {
+tc_d63b71d1, TypeCR>, Enc_454a26 {
let Inst{7-2} = 0b000000;
let Inst{13-10} = 0b0000;
let Inst{31-18} = 0b01101011111000;
@@ -3701,7 +3789,7 @@ def C2_pxfer_map : HInst<
(outs PredRegs:$Pd4),
(ins PredRegs:$Ps4),
"$Pd4 = $Ps4",
-S_2op_tc_1_SLOT23, TypeMAPPING> {
+tc_d63b71d1, TypeMAPPING> {
let isPseudo = 1;
let isCodeGenOnly = 1;
}
@@ -3709,7 +3797,7 @@ def C2_tfrpr : HInst<
(outs IntRegs:$Rd32),
(ins PredRegs:$Ps4),
"$Rd32 = $Ps4",
-S_2op_tc_1_SLOT23, TypeS_2op>, Enc_11139981 {
+tc_b86c7e8b, TypeS_2op>, Enc_f5e933 {
let Inst{13-5} = 0b000000000;
let Inst{31-18} = 0b10001001010000;
let hasNewValue = 1;
@@ -3719,7 +3807,7 @@ def C2_tfrrp : HInst<
(outs PredRegs:$Pd4),
(ins IntRegs:$Rs32),
"$Pd4 = $Rs32",
-S_2op_tc_2early_SLOT23, TypeS_2op>, Enc_4527648 {
+tc_47f0b7ad, TypeS_2op>, Enc_48b75f {
let Inst{13-2} = 0b000000000000;
let Inst{31-21} = 0b10000101010;
}
@@ -3727,18 +3815,19 @@ def C2_vitpack : HInst<
(outs IntRegs:$Rd32),
(ins PredRegs:$Ps4, PredRegs:$Pt4),
"$Rd32 = vitpack($Ps4,$Pt4)",
-S_2op_tc_1_SLOT23, TypeS_2op>, Enc_6735062 {
+tc_7ca2ea10, TypeS_2op>, Enc_527412 {
let Inst{7-5} = 0b000;
let Inst{13-10} = 0b0000;
let Inst{31-18} = 0b10001001000000;
let hasNewValue = 1;
let opNewValue = 0;
+let prefersSlot3 = 1;
}
def C2_vmux : HInst<
(outs DoubleRegs:$Rdd32),
(ins PredRegs:$Pu4, DoubleRegs:$Rss32, DoubleRegs:$Rtt32),
"$Rdd32 = vmux($Pu4,$Rss32,$Rtt32)",
-ALU64_tc_1_SLOT23, TypeALU64>, Enc_7606379 {
+tc_d1b5a4b6, TypeALU64>, Enc_329361 {
let Inst{7-7} = 0b0;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11010001000;
@@ -3747,7 +3836,7 @@ def C2_xor : HInst<
(outs PredRegs:$Pd4),
(ins PredRegs:$Ps4, PredRegs:$Pt4),
"$Pd4 = xor($Ps4,$Pt4)",
-CR_tc_2early_SLOT23, TypeCR>, Enc_8324216 {
+tc_d63b71d1, TypeCR>, Enc_284ebb {
let Inst{7-2} = 0b000000;
let Inst{13-10} = 0b0000;
let Inst{31-18} = 0b01101011010000;
@@ -3756,7 +3845,7 @@ def C4_addipc : HInst<
(outs IntRegs:$Rd32),
(ins u32_0Imm:$Ii),
"$Rd32 = add(pc,#$Ii)",
-CR_tc_2_SLOT3, TypeCR>, Enc_9554661 {
+tc_1fe8323c, TypeCR>, Enc_607661 {
let Inst{6-5} = 0b00;
let Inst{13-13} = 0b0;
let Inst{31-16} = 0b0110101001001001;
@@ -3772,7 +3861,7 @@ def C4_and_and : HInst<
(outs PredRegs:$Pd4),
(ins PredRegs:$Ps4, PredRegs:$Pt4, PredRegs:$Pu4),
"$Pd4 = and($Ps4,and($Pt4,$Pu4))",
-CR_tc_2early_SLOT23, TypeCR>, Enc_4631106 {
+tc_43068634, TypeCR>, Enc_9ac432 {
let Inst{5-2} = 0b0000;
let Inst{13-10} = 0b0000;
let Inst{31-18} = 0b01101011000100;
@@ -3781,7 +3870,7 @@ def C4_and_andn : HInst<
(outs PredRegs:$Pd4),
(ins PredRegs:$Ps4, PredRegs:$Pt4, PredRegs:$Pu4),
"$Pd4 = and($Ps4,and($Pt4,!$Pu4))",
-CR_tc_2early_SLOT23, TypeCR>, Enc_4631106 {
+tc_43068634, TypeCR>, Enc_9ac432 {
let Inst{5-2} = 0b0000;
let Inst{13-10} = 0b0000;
let Inst{31-18} = 0b01101011100100;
@@ -3790,7 +3879,7 @@ def C4_and_or : HInst<
(outs PredRegs:$Pd4),
(ins PredRegs:$Ps4, PredRegs:$Pt4, PredRegs:$Pu4),
"$Pd4 = and($Ps4,or($Pt4,$Pu4))",
-CR_tc_2early_SLOT23, TypeCR>, Enc_4631106 {
+tc_43068634, TypeCR>, Enc_9ac432 {
let Inst{5-2} = 0b0000;
let Inst{13-10} = 0b0000;
let Inst{31-18} = 0b01101011001100;
@@ -3799,7 +3888,7 @@ def C4_and_orn : HInst<
(outs PredRegs:$Pd4),
(ins PredRegs:$Ps4, PredRegs:$Pt4, PredRegs:$Pu4),
"$Pd4 = and($Ps4,or($Pt4,!$Pu4))",
-CR_tc_2early_SLOT23, TypeCR>, Enc_4631106 {
+tc_43068634, TypeCR>, Enc_9ac432 {
let Inst{5-2} = 0b0000;
let Inst{13-10} = 0b0000;
let Inst{31-18} = 0b01101011101100;
@@ -3808,7 +3897,7 @@ def C4_cmplte : HInst<
(outs PredRegs:$Pd4),
(ins IntRegs:$Rs32, IntRegs:$Rt32),
"$Pd4 = !cmp.gt($Rs32,$Rt32)",
-ALU32_3op_tc_1_SLOT0123, TypeALU32_3op>, Enc_10157519, ImmRegRel {
+tc_5fe9fcd0, TypeALU32_3op>, Enc_c2b48e, ImmRegRel {
let Inst{7-2} = 0b000100;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11110010010;
@@ -3820,7 +3909,7 @@ def C4_cmpltei : HInst<
(outs PredRegs:$Pd4),
(ins IntRegs:$Rs32, s32_0Imm:$Ii),
"$Pd4 = !cmp.gt($Rs32,#$Ii)",
-ALU32_2op_tc_2early_SLOT0123, TypeALU32_2op>, Enc_16014536, ImmRegRel {
+tc_9df8b0dc, TypeALU32_2op>, Enc_bd0b33, ImmRegRel {
let Inst{4-2} = 0b100;
let Inst{31-22} = 0b0111010101;
let CextOpcode = "C4_cmplte";
@@ -3836,7 +3925,7 @@ def C4_cmplteu : HInst<
(outs PredRegs:$Pd4),
(ins IntRegs:$Rs32, IntRegs:$Rt32),
"$Pd4 = !cmp.gtu($Rs32,$Rt32)",
-ALU32_3op_tc_1_SLOT0123, TypeALU32_3op>, Enc_10157519, ImmRegRel {
+tc_5fe9fcd0, TypeALU32_3op>, Enc_c2b48e, ImmRegRel {
let Inst{7-2} = 0b000100;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11110010011;
@@ -3848,7 +3937,7 @@ def C4_cmplteui : HInst<
(outs PredRegs:$Pd4),
(ins IntRegs:$Rs32, u32_0Imm:$Ii),
"$Pd4 = !cmp.gtu($Rs32,#$Ii)",
-ALU32_2op_tc_2early_SLOT0123, TypeALU32_2op>, Enc_13249928, ImmRegRel {
+tc_9df8b0dc, TypeALU32_2op>, Enc_c0cdde, ImmRegRel {
let Inst{4-2} = 0b100;
let Inst{31-21} = 0b01110101100;
let CextOpcode = "C4_cmplteu";
@@ -3864,7 +3953,7 @@ def C4_cmpneq : HInst<
(outs PredRegs:$Pd4),
(ins IntRegs:$Rs32, IntRegs:$Rt32),
"$Pd4 = !cmp.eq($Rs32,$Rt32)",
-ALU32_3op_tc_1_SLOT0123, TypeALU32_3op>, Enc_10157519, ImmRegRel {
+tc_5fe9fcd0, TypeALU32_3op>, Enc_c2b48e, ImmRegRel {
let Inst{7-2} = 0b000100;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11110010000;
@@ -3877,7 +3966,7 @@ def C4_cmpneqi : HInst<
(outs PredRegs:$Pd4),
(ins IntRegs:$Rs32, s32_0Imm:$Ii),
"$Pd4 = !cmp.eq($Rs32,#$Ii)",
-ALU32_2op_tc_2early_SLOT0123, TypeALU32_2op>, Enc_16014536, ImmRegRel {
+tc_9df8b0dc, TypeALU32_2op>, Enc_bd0b33, ImmRegRel {
let Inst{4-2} = 0b100;
let Inst{31-22} = 0b0111010100;
let CextOpcode = "C4_cmpneq";
@@ -3893,7 +3982,7 @@ def C4_fastcorner9 : HInst<
(outs PredRegs:$Pd4),
(ins PredRegs:$Ps4, PredRegs:$Pt4),
"$Pd4 = fastcorner9($Ps4,$Pt4)",
-CR_tc_2early_SLOT23, TypeCR>, Enc_8324216 {
+tc_d63b71d1, TypeCR>, Enc_284ebb {
let Inst{7-2} = 0b100100;
let Inst{13-10} = 0b1000;
let Inst{31-18} = 0b01101011000000;
@@ -3902,7 +3991,7 @@ def C4_fastcorner9_not : HInst<
(outs PredRegs:$Pd4),
(ins PredRegs:$Ps4, PredRegs:$Pt4),
"$Pd4 = !fastcorner9($Ps4,$Pt4)",
-CR_tc_2early_SLOT23, TypeCR>, Enc_8324216 {
+tc_d63b71d1, TypeCR>, Enc_284ebb {
let Inst{7-2} = 0b100100;
let Inst{13-10} = 0b1000;
let Inst{31-18} = 0b01101011000100;
@@ -3911,7 +4000,7 @@ def C4_nbitsclr : HInst<
(outs PredRegs:$Pd4),
(ins IntRegs:$Rs32, IntRegs:$Rt32),
"$Pd4 = !bitsclr($Rs32,$Rt32)",
-S_3op_tc_2early_SLOT23, TypeS_3op>, Enc_10157519 {
+tc_c58f771a, TypeS_3op>, Enc_c2b48e {
let Inst{7-2} = 0b000000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11000111101;
@@ -3920,7 +4009,7 @@ def C4_nbitsclri : HInst<
(outs PredRegs:$Pd4),
(ins IntRegs:$Rs32, u6_0Imm:$Ii),
"$Pd4 = !bitsclr($Rs32,#$Ii)",
-S_2op_tc_2early_SLOT23, TypeS_2op>, Enc_14574598 {
+tc_5fa2857c, TypeS_2op>, Enc_5d6c34 {
let Inst{7-2} = 0b000000;
let Inst{31-21} = 0b10000101101;
}
@@ -3928,7 +4017,7 @@ def C4_nbitsset : HInst<
(outs PredRegs:$Pd4),
(ins IntRegs:$Rs32, IntRegs:$Rt32),
"$Pd4 = !bitsset($Rs32,$Rt32)",
-S_3op_tc_2early_SLOT23, TypeS_3op>, Enc_10157519 {
+tc_c58f771a, TypeS_3op>, Enc_c2b48e {
let Inst{7-2} = 0b000000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11000111011;
@@ -3937,7 +4026,7 @@ def C4_or_and : HInst<
(outs PredRegs:$Pd4),
(ins PredRegs:$Ps4, PredRegs:$Pt4, PredRegs:$Pu4),
"$Pd4 = or($Ps4,and($Pt4,$Pu4))",
-CR_tc_2early_SLOT23, TypeCR>, Enc_4631106 {
+tc_43068634, TypeCR>, Enc_9ac432 {
let Inst{5-2} = 0b0000;
let Inst{13-10} = 0b0000;
let Inst{31-18} = 0b01101011010100;
@@ -3946,7 +4035,7 @@ def C4_or_andn : HInst<
(outs PredRegs:$Pd4),
(ins PredRegs:$Ps4, PredRegs:$Pt4, PredRegs:$Pu4),
"$Pd4 = or($Ps4,and($Pt4,!$Pu4))",
-CR_tc_2early_SLOT23, TypeCR>, Enc_4631106 {
+tc_43068634, TypeCR>, Enc_9ac432 {
let Inst{5-2} = 0b0000;
let Inst{13-10} = 0b0000;
let Inst{31-18} = 0b01101011110100;
@@ -3955,7 +4044,7 @@ def C4_or_or : HInst<
(outs PredRegs:$Pd4),
(ins PredRegs:$Ps4, PredRegs:$Pt4, PredRegs:$Pu4),
"$Pd4 = or($Ps4,or($Pt4,$Pu4))",
-CR_tc_2early_SLOT23, TypeCR>, Enc_4631106 {
+tc_43068634, TypeCR>, Enc_9ac432 {
let Inst{5-2} = 0b0000;
let Inst{13-10} = 0b0000;
let Inst{31-18} = 0b01101011011100;
@@ -3964,7 +4053,7 @@ def C4_or_orn : HInst<
(outs PredRegs:$Pd4),
(ins PredRegs:$Ps4, PredRegs:$Pt4, PredRegs:$Pu4),
"$Pd4 = or($Ps4,or($Pt4,!$Pu4))",
-CR_tc_2early_SLOT23, TypeCR>, Enc_4631106 {
+tc_43068634, TypeCR>, Enc_9ac432 {
let Inst{5-2} = 0b0000;
let Inst{13-10} = 0b0000;
let Inst{31-18} = 0b01101011111100;
@@ -3973,319 +4062,293 @@ def F2_conv_d2df : HInst<
(outs DoubleRegs:$Rdd32),
(ins DoubleRegs:$Rss32),
"$Rdd32 = convert_d2df($Rss32)",
-S_2op_tc_3or4x_SLOT23, TypeS_2op>, Enc_13133231, Requires<[HasV5T]> {
+tc_e836c161, TypeS_2op>, Enc_b9c5fb, Requires<[HasV5T]> {
let Inst{13-5} = 0b000000011;
let Inst{31-21} = 0b10000000111;
let isFP = 1;
-let prefersSlot3 = 1;
let Uses = [USR];
}
def F2_conv_d2sf : HInst<
(outs IntRegs:$Rd32),
(ins DoubleRegs:$Rss32),
"$Rd32 = convert_d2sf($Rss32)",
-S_2op_tc_3or4x_SLOT23, TypeS_2op>, Enc_3742184, Requires<[HasV5T]> {
+tc_e836c161, TypeS_2op>, Enc_90cd8b, Requires<[HasV5T]> {
let Inst{13-5} = 0b000000001;
let Inst{31-21} = 0b10001000010;
let hasNewValue = 1;
let opNewValue = 0;
let isFP = 1;
-let prefersSlot3 = 1;
let Uses = [USR];
}
def F2_conv_df2d : HInst<
(outs DoubleRegs:$Rdd32),
(ins DoubleRegs:$Rss32),
"$Rdd32 = convert_df2d($Rss32)",
-S_2op_tc_3or4x_SLOT23, TypeS_2op>, Enc_13133231, Requires<[HasV5T]> {
+tc_e836c161, TypeS_2op>, Enc_b9c5fb, Requires<[HasV5T]> {
let Inst{13-5} = 0b000000000;
let Inst{31-21} = 0b10000000111;
let isFP = 1;
-let prefersSlot3 = 1;
let Uses = [USR];
}
def F2_conv_df2d_chop : HInst<
(outs DoubleRegs:$Rdd32),
(ins DoubleRegs:$Rss32),
"$Rdd32 = convert_df2d($Rss32):chop",
-S_2op_tc_3or4x_SLOT23, TypeS_2op>, Enc_13133231, Requires<[HasV5T]> {
+tc_e836c161, TypeS_2op>, Enc_b9c5fb, Requires<[HasV5T]> {
let Inst{13-5} = 0b000000110;
let Inst{31-21} = 0b10000000111;
let isFP = 1;
-let prefersSlot3 = 1;
let Uses = [USR];
}
def F2_conv_df2sf : HInst<
(outs IntRegs:$Rd32),
(ins DoubleRegs:$Rss32),
"$Rd32 = convert_df2sf($Rss32)",
-S_2op_tc_3or4x_SLOT23, TypeS_2op>, Enc_3742184, Requires<[HasV5T]> {
+tc_e836c161, TypeS_2op>, Enc_90cd8b, Requires<[HasV5T]> {
let Inst{13-5} = 0b000000001;
let Inst{31-21} = 0b10001000000;
let hasNewValue = 1;
let opNewValue = 0;
let isFP = 1;
-let prefersSlot3 = 1;
let Uses = [USR];
}
def F2_conv_df2ud : HInst<
(outs DoubleRegs:$Rdd32),
(ins DoubleRegs:$Rss32),
"$Rdd32 = convert_df2ud($Rss32)",
-S_2op_tc_3or4x_SLOT23, TypeS_2op>, Enc_13133231, Requires<[HasV5T]> {
+tc_e836c161, TypeS_2op>, Enc_b9c5fb, Requires<[HasV5T]> {
let Inst{13-5} = 0b000000001;
let Inst{31-21} = 0b10000000111;
let isFP = 1;
-let prefersSlot3 = 1;
let Uses = [USR];
}
def F2_conv_df2ud_chop : HInst<
(outs DoubleRegs:$Rdd32),
(ins DoubleRegs:$Rss32),
"$Rdd32 = convert_df2ud($Rss32):chop",
-S_2op_tc_3or4x_SLOT23, TypeS_2op>, Enc_13133231, Requires<[HasV5T]> {
+tc_e836c161, TypeS_2op>, Enc_b9c5fb, Requires<[HasV5T]> {
let Inst{13-5} = 0b000000111;
let Inst{31-21} = 0b10000000111;
let isFP = 1;
-let prefersSlot3 = 1;
let Uses = [USR];
}
def F2_conv_df2uw : HInst<
(outs IntRegs:$Rd32),
(ins DoubleRegs:$Rss32),
"$Rd32 = convert_df2uw($Rss32)",
-S_2op_tc_3or4x_SLOT23, TypeS_2op>, Enc_3742184, Requires<[HasV5T]> {
+tc_e836c161, TypeS_2op>, Enc_90cd8b, Requires<[HasV5T]> {
let Inst{13-5} = 0b000000001;
let Inst{31-21} = 0b10001000011;
let hasNewValue = 1;
let opNewValue = 0;
let isFP = 1;
-let prefersSlot3 = 1;
let Uses = [USR];
}
def F2_conv_df2uw_chop : HInst<
(outs IntRegs:$Rd32),
(ins DoubleRegs:$Rss32),
"$Rd32 = convert_df2uw($Rss32):chop",
-S_2op_tc_3or4x_SLOT23, TypeS_2op>, Enc_3742184, Requires<[HasV5T]> {
+tc_e836c161, TypeS_2op>, Enc_90cd8b, Requires<[HasV5T]> {
let Inst{13-5} = 0b000000001;
let Inst{31-21} = 0b10001000101;
let hasNewValue = 1;
let opNewValue = 0;
let isFP = 1;
-let prefersSlot3 = 1;
let Uses = [USR];
}
def F2_conv_df2w : HInst<
(outs IntRegs:$Rd32),
(ins DoubleRegs:$Rss32),
"$Rd32 = convert_df2w($Rss32)",
-S_2op_tc_3or4x_SLOT23, TypeS_2op>, Enc_3742184, Requires<[HasV5T]> {
+tc_e836c161, TypeS_2op>, Enc_90cd8b, Requires<[HasV5T]> {
let Inst{13-5} = 0b000000001;
let Inst{31-21} = 0b10001000100;
let hasNewValue = 1;
let opNewValue = 0;
let isFP = 1;
-let prefersSlot3 = 1;
let Uses = [USR];
}
def F2_conv_df2w_chop : HInst<
(outs IntRegs:$Rd32),
(ins DoubleRegs:$Rss32),
"$Rd32 = convert_df2w($Rss32):chop",
-S_2op_tc_3or4x_SLOT23, TypeS_2op>, Enc_3742184, Requires<[HasV5T]> {
+tc_e836c161, TypeS_2op>, Enc_90cd8b, Requires<[HasV5T]> {
let Inst{13-5} = 0b000000001;
let Inst{31-21} = 0b10001000111;
let hasNewValue = 1;
let opNewValue = 0;
let isFP = 1;
-let prefersSlot3 = 1;
let Uses = [USR];
}
def F2_conv_sf2d : HInst<
(outs DoubleRegs:$Rdd32),
(ins IntRegs:$Rs32),
"$Rdd32 = convert_sf2d($Rs32)",
-S_2op_tc_3or4x_SLOT23, TypeS_2op>, Enc_4030179, Requires<[HasV5T]> {
+tc_e836c161, TypeS_2op>, Enc_3a3d62, Requires<[HasV5T]> {
let Inst{13-5} = 0b000000100;
let Inst{31-21} = 0b10000100100;
let isFP = 1;
-let prefersSlot3 = 1;
let Uses = [USR];
}
def F2_conv_sf2d_chop : HInst<
(outs DoubleRegs:$Rdd32),
(ins IntRegs:$Rs32),
"$Rdd32 = convert_sf2d($Rs32):chop",
-S_2op_tc_3or4x_SLOT23, TypeS_2op>, Enc_4030179, Requires<[HasV5T]> {
+tc_e836c161, TypeS_2op>, Enc_3a3d62, Requires<[HasV5T]> {
let Inst{13-5} = 0b000000110;
let Inst{31-21} = 0b10000100100;
let isFP = 1;
-let prefersSlot3 = 1;
let Uses = [USR];
}
def F2_conv_sf2df : HInst<
(outs DoubleRegs:$Rdd32),
(ins IntRegs:$Rs32),
"$Rdd32 = convert_sf2df($Rs32)",
-S_2op_tc_3or4x_SLOT23, TypeS_2op>, Enc_4030179, Requires<[HasV5T]> {
+tc_e836c161, TypeS_2op>, Enc_3a3d62, Requires<[HasV5T]> {
let Inst{13-5} = 0b000000000;
let Inst{31-21} = 0b10000100100;
let isFP = 1;
-let prefersSlot3 = 1;
let Uses = [USR];
}
def F2_conv_sf2ud : HInst<
(outs DoubleRegs:$Rdd32),
(ins IntRegs:$Rs32),
"$Rdd32 = convert_sf2ud($Rs32)",
-S_2op_tc_3or4x_SLOT23, TypeS_2op>, Enc_4030179, Requires<[HasV5T]> {
+tc_e836c161, TypeS_2op>, Enc_3a3d62, Requires<[HasV5T]> {
let Inst{13-5} = 0b000000011;
let Inst{31-21} = 0b10000100100;
let isFP = 1;
-let prefersSlot3 = 1;
let Uses = [USR];
}
def F2_conv_sf2ud_chop : HInst<
(outs DoubleRegs:$Rdd32),
(ins IntRegs:$Rs32),
"$Rdd32 = convert_sf2ud($Rs32):chop",
-S_2op_tc_3or4x_SLOT23, TypeS_2op>, Enc_4030179, Requires<[HasV5T]> {
+tc_e836c161, TypeS_2op>, Enc_3a3d62, Requires<[HasV5T]> {
let Inst{13-5} = 0b000000101;
let Inst{31-21} = 0b10000100100;
let isFP = 1;
-let prefersSlot3 = 1;
let Uses = [USR];
}
def F2_conv_sf2uw : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rs32),
"$Rd32 = convert_sf2uw($Rs32)",
-S_2op_tc_3or4x_SLOT23, TypeS_2op>, Enc_4075554, Requires<[HasV5T]> {
+tc_e836c161, TypeS_2op>, Enc_5e2823, Requires<[HasV5T]> {
let Inst{13-5} = 0b000000000;
let Inst{31-21} = 0b10001011011;
let hasNewValue = 1;
let opNewValue = 0;
let isFP = 1;
-let prefersSlot3 = 1;
let Uses = [USR];
}
def F2_conv_sf2uw_chop : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rs32),
"$Rd32 = convert_sf2uw($Rs32):chop",
-S_2op_tc_3or4x_SLOT23, TypeS_2op>, Enc_4075554, Requires<[HasV5T]> {
+tc_e836c161, TypeS_2op>, Enc_5e2823, Requires<[HasV5T]> {
let Inst{13-5} = 0b000000001;
let Inst{31-21} = 0b10001011011;
let hasNewValue = 1;
let opNewValue = 0;
let isFP = 1;
-let prefersSlot3 = 1;
let Uses = [USR];
}
def F2_conv_sf2w : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rs32),
"$Rd32 = convert_sf2w($Rs32)",
-S_2op_tc_3or4x_SLOT23, TypeS_2op>, Enc_4075554, Requires<[HasV5T]> {
+tc_e836c161, TypeS_2op>, Enc_5e2823, Requires<[HasV5T]> {
let Inst{13-5} = 0b000000000;
let Inst{31-21} = 0b10001011100;
let hasNewValue = 1;
let opNewValue = 0;
let isFP = 1;
-let prefersSlot3 = 1;
let Uses = [USR];
}
def F2_conv_sf2w_chop : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rs32),
"$Rd32 = convert_sf2w($Rs32):chop",
-S_2op_tc_3or4x_SLOT23, TypeS_2op>, Enc_4075554, Requires<[HasV5T]> {
+tc_e836c161, TypeS_2op>, Enc_5e2823, Requires<[HasV5T]> {
let Inst{13-5} = 0b000000001;
let Inst{31-21} = 0b10001011100;
let hasNewValue = 1;
let opNewValue = 0;
let isFP = 1;
-let prefersSlot3 = 1;
let Uses = [USR];
}
def F2_conv_ud2df : HInst<
(outs DoubleRegs:$Rdd32),
(ins DoubleRegs:$Rss32),
"$Rdd32 = convert_ud2df($Rss32)",
-S_2op_tc_3or4x_SLOT23, TypeS_2op>, Enc_13133231, Requires<[HasV5T]> {
+tc_e836c161, TypeS_2op>, Enc_b9c5fb, Requires<[HasV5T]> {
let Inst{13-5} = 0b000000010;
let Inst{31-21} = 0b10000000111;
let isFP = 1;
-let prefersSlot3 = 1;
let Uses = [USR];
}
def F2_conv_ud2sf : HInst<
(outs IntRegs:$Rd32),
(ins DoubleRegs:$Rss32),
"$Rd32 = convert_ud2sf($Rss32)",
-S_2op_tc_3or4x_SLOT23, TypeS_2op>, Enc_3742184, Requires<[HasV5T]> {
+tc_e836c161, TypeS_2op>, Enc_90cd8b, Requires<[HasV5T]> {
let Inst{13-5} = 0b000000001;
let Inst{31-21} = 0b10001000001;
let hasNewValue = 1;
let opNewValue = 0;
let isFP = 1;
-let prefersSlot3 = 1;
let Uses = [USR];
}
def F2_conv_uw2df : HInst<
(outs DoubleRegs:$Rdd32),
(ins IntRegs:$Rs32),
"$Rdd32 = convert_uw2df($Rs32)",
-S_2op_tc_3or4x_SLOT23, TypeS_2op>, Enc_4030179, Requires<[HasV5T]> {
+tc_e836c161, TypeS_2op>, Enc_3a3d62, Requires<[HasV5T]> {
let Inst{13-5} = 0b000000001;
let Inst{31-21} = 0b10000100100;
let isFP = 1;
-let prefersSlot3 = 1;
let Uses = [USR];
}
def F2_conv_uw2sf : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rs32),
"$Rd32 = convert_uw2sf($Rs32)",
-S_2op_tc_3or4x_SLOT23, TypeS_2op>, Enc_4075554, Requires<[HasV5T]> {
+tc_e836c161, TypeS_2op>, Enc_5e2823, Requires<[HasV5T]> {
let Inst{13-5} = 0b000000000;
let Inst{31-21} = 0b10001011001;
let hasNewValue = 1;
let opNewValue = 0;
let isFP = 1;
-let prefersSlot3 = 1;
let Uses = [USR];
}
def F2_conv_w2df : HInst<
(outs DoubleRegs:$Rdd32),
(ins IntRegs:$Rs32),
"$Rdd32 = convert_w2df($Rs32)",
-S_2op_tc_3or4x_SLOT23, TypeS_2op>, Enc_4030179, Requires<[HasV5T]> {
+tc_e836c161, TypeS_2op>, Enc_3a3d62, Requires<[HasV5T]> {
let Inst{13-5} = 0b000000010;
let Inst{31-21} = 0b10000100100;
let isFP = 1;
-let prefersSlot3 = 1;
let Uses = [USR];
}
def F2_conv_w2sf : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rs32),
"$Rd32 = convert_w2sf($Rs32)",
-S_2op_tc_3or4x_SLOT23, TypeS_2op>, Enc_4075554, Requires<[HasV5T]> {
+tc_e836c161, TypeS_2op>, Enc_5e2823, Requires<[HasV5T]> {
let Inst{13-5} = 0b000000000;
let Inst{31-21} = 0b10001011010;
let hasNewValue = 1;
let opNewValue = 0;
let isFP = 1;
-let prefersSlot3 = 1;
let Uses = [USR];
}
def F2_dfclass : HInst<
(outs PredRegs:$Pd4),
(ins DoubleRegs:$Rss32, u5_0Imm:$Ii),
"$Pd4 = dfclass($Rss32,#$Ii)",
-ALU64_tc_2early_SLOT23, TypeALU64>, Enc_14400220, Requires<[HasV5T]> {
+tc_5fa2857c, TypeALU64>, Enc_1f19b5, Requires<[HasV5T]> {
let Inst{4-2} = 0b100;
let Inst{13-10} = 0b0000;
let Inst{31-21} = 0b11011100100;
@@ -4296,7 +4359,7 @@ def F2_dfcmpeq : HInst<
(outs PredRegs:$Pd4),
(ins DoubleRegs:$Rss32, DoubleRegs:$Rtt32),
"$Pd4 = dfcmp.eq($Rss32,$Rtt32)",
-ALU64_tc_2early_SLOT23, TypeALU64>, Enc_3831744, Requires<[HasV5T]> {
+tc_c58f771a, TypeALU64>, Enc_fcf7a7, Requires<[HasV5T]> {
let Inst{7-2} = 0b000000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11010010111;
@@ -4308,7 +4371,7 @@ def F2_dfcmpge : HInst<
(outs PredRegs:$Pd4),
(ins DoubleRegs:$Rss32, DoubleRegs:$Rtt32),
"$Pd4 = dfcmp.ge($Rss32,$Rtt32)",
-ALU64_tc_2early_SLOT23, TypeALU64>, Enc_3831744, Requires<[HasV5T]> {
+tc_c58f771a, TypeALU64>, Enc_fcf7a7, Requires<[HasV5T]> {
let Inst{7-2} = 0b010000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11010010111;
@@ -4320,7 +4383,7 @@ def F2_dfcmpgt : HInst<
(outs PredRegs:$Pd4),
(ins DoubleRegs:$Rss32, DoubleRegs:$Rtt32),
"$Pd4 = dfcmp.gt($Rss32,$Rtt32)",
-ALU64_tc_2early_SLOT23, TypeALU64>, Enc_3831744, Requires<[HasV5T]> {
+tc_c58f771a, TypeALU64>, Enc_fcf7a7, Requires<[HasV5T]> {
let Inst{7-2} = 0b001000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11010010111;
@@ -4332,7 +4395,7 @@ def F2_dfcmpuo : HInst<
(outs PredRegs:$Pd4),
(ins DoubleRegs:$Rss32, DoubleRegs:$Rtt32),
"$Pd4 = dfcmp.uo($Rss32,$Rtt32)",
-ALU64_tc_2early_SLOT23, TypeALU64>, Enc_3831744, Requires<[HasV5T]> {
+tc_c58f771a, TypeALU64>, Enc_fcf7a7, Requires<[HasV5T]> {
let Inst{7-2} = 0b011000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11010010111;
@@ -4344,7 +4407,7 @@ def F2_dfimm_n : HInst<
(outs DoubleRegs:$Rdd32),
(ins u10_0Imm:$Ii),
"$Rdd32 = dfmake(#$Ii):neg",
-ALU64_tc_2_SLOT23, TypeALU64>, Enc_2702036, Requires<[HasV5T]> {
+tc_485bb57c, TypeALU64>, Enc_e6c957, Requires<[HasV5T]> {
let Inst{20-16} = 0b00000;
let Inst{31-22} = 0b1101100101;
let prefersSlot3 = 1;
@@ -4353,7 +4416,7 @@ def F2_dfimm_p : HInst<
(outs DoubleRegs:$Rdd32),
(ins u10_0Imm:$Ii),
"$Rdd32 = dfmake(#$Ii):pos",
-ALU64_tc_2_SLOT23, TypeALU64>, Enc_2702036, Requires<[HasV5T]> {
+tc_485bb57c, TypeALU64>, Enc_e6c957, Requires<[HasV5T]> {
let Inst{20-16} = 0b00000;
let Inst{31-22} = 0b1101100100;
let prefersSlot3 = 1;
@@ -4362,14 +4425,13 @@ def F2_sfadd : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rs32, IntRegs:$Rt32),
"$Rd32 = sfadd($Rs32,$Rt32)",
-M_tc_3or4x_SLOT23, TypeM>, Enc_14071773, Requires<[HasV5T]> {
+tc_3bea1824, TypeM>, Enc_5ab2be, Requires<[HasV5T]> {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101011000;
let hasNewValue = 1;
let opNewValue = 0;
let isFP = 1;
-let prefersSlot3 = 1;
let Uses = [USR];
let isCommutable = 1;
}
@@ -4377,7 +4439,7 @@ def F2_sfclass : HInst<
(outs PredRegs:$Pd4),
(ins IntRegs:$Rs32, u5_0Imm:$Ii),
"$Pd4 = sfclass($Rs32,#$Ii)",
-S_2op_tc_2early_SLOT23, TypeS_2op>, Enc_2103742, Requires<[HasV5T]> {
+tc_5fa2857c, TypeS_2op>, Enc_83ee64, Requires<[HasV5T]> {
let Inst{7-2} = 0b000000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b10000101111;
@@ -4388,7 +4450,7 @@ def F2_sfcmpeq : HInst<
(outs PredRegs:$Pd4),
(ins IntRegs:$Rs32, IntRegs:$Rt32),
"$Pd4 = sfcmp.eq($Rs32,$Rt32)",
-ALU64_tc_2early_SLOT23, TypeS_3op>, Enc_10157519, Requires<[HasV5T]> {
+tc_c58f771a, TypeS_3op>, Enc_c2b48e, Requires<[HasV5T]> {
let Inst{7-2} = 0b011000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11000111111;
@@ -4400,7 +4462,7 @@ def F2_sfcmpge : HInst<
(outs PredRegs:$Pd4),
(ins IntRegs:$Rs32, IntRegs:$Rt32),
"$Pd4 = sfcmp.ge($Rs32,$Rt32)",
-ALU64_tc_2early_SLOT23, TypeS_3op>, Enc_10157519, Requires<[HasV5T]> {
+tc_c58f771a, TypeS_3op>, Enc_c2b48e, Requires<[HasV5T]> {
let Inst{7-2} = 0b000000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11000111111;
@@ -4412,7 +4474,7 @@ def F2_sfcmpgt : HInst<
(outs PredRegs:$Pd4),
(ins IntRegs:$Rs32, IntRegs:$Rt32),
"$Pd4 = sfcmp.gt($Rs32,$Rt32)",
-ALU64_tc_2early_SLOT23, TypeS_3op>, Enc_10157519, Requires<[HasV5T]> {
+tc_c58f771a, TypeS_3op>, Enc_c2b48e, Requires<[HasV5T]> {
let Inst{7-2} = 0b100000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11000111111;
@@ -4424,7 +4486,7 @@ def F2_sfcmpuo : HInst<
(outs PredRegs:$Pd4),
(ins IntRegs:$Rs32, IntRegs:$Rt32),
"$Pd4 = sfcmp.uo($Rs32,$Rt32)",
-ALU64_tc_2early_SLOT23, TypeS_3op>, Enc_10157519, Requires<[HasV5T]> {
+tc_c58f771a, TypeS_3op>, Enc_c2b48e, Requires<[HasV5T]> {
let Inst{7-2} = 0b001000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11000111111;
@@ -4436,52 +4498,48 @@ def F2_sffixupd : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rs32, IntRegs:$Rt32),
"$Rd32 = sffixupd($Rs32,$Rt32)",
-M_tc_3or4x_SLOT23, TypeM>, Enc_14071773, Requires<[HasV5T]> {
+tc_3bea1824, TypeM>, Enc_5ab2be, Requires<[HasV5T]> {
let Inst{7-5} = 0b001;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101011110;
let hasNewValue = 1;
let opNewValue = 0;
let isFP = 1;
-let prefersSlot3 = 1;
}
def F2_sffixupn : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rs32, IntRegs:$Rt32),
"$Rd32 = sffixupn($Rs32,$Rt32)",
-M_tc_3or4x_SLOT23, TypeM>, Enc_14071773, Requires<[HasV5T]> {
+tc_3bea1824, TypeM>, Enc_5ab2be, Requires<[HasV5T]> {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101011110;
let hasNewValue = 1;
let opNewValue = 0;
let isFP = 1;
-let prefersSlot3 = 1;
}
def F2_sffixupr : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rs32),
"$Rd32 = sffixupr($Rs32)",
-S_2op_tc_3or4x_SLOT23, TypeS_2op>, Enc_4075554, Requires<[HasV5T]> {
+tc_e836c161, TypeS_2op>, Enc_5e2823, Requires<[HasV5T]> {
let Inst{13-5} = 0b000000000;
let Inst{31-21} = 0b10001011101;
let hasNewValue = 1;
let opNewValue = 0;
let isFP = 1;
-let prefersSlot3 = 1;
}
def F2_sffma : HInst<
(outs IntRegs:$Rx32),
(ins IntRegs:$Rx32in, IntRegs:$Rs32, IntRegs:$Rt32),
"$Rx32 += sfmpy($Rs32,$Rt32)",
-M_tc_3or4x_acc_SLOT23, TypeM>, Enc_9223889, Requires<[HasV5T]> {
+tc_2d1e6f5c, TypeM>, Enc_2ae154, Requires<[HasV5T]> {
let Inst{7-5} = 0b100;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101111000;
let hasNewValue = 1;
let opNewValue = 0;
let isFP = 1;
-let prefersSlot3 = 1;
let Uses = [USR];
let Constraints = "$Rx32 = $Rx32in";
}
@@ -4489,14 +4547,13 @@ def F2_sffma_lib : HInst<
(outs IntRegs:$Rx32),
(ins IntRegs:$Rx32in, IntRegs:$Rs32, IntRegs:$Rt32),
"$Rx32 += sfmpy($Rs32,$Rt32):lib",
-M_tc_3or4x_acc_SLOT23, TypeM>, Enc_9223889, Requires<[HasV5T]> {
+tc_2d1e6f5c, TypeM>, Enc_2ae154, Requires<[HasV5T]> {
let Inst{7-5} = 0b110;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101111000;
let hasNewValue = 1;
let opNewValue = 0;
let isFP = 1;
-let prefersSlot3 = 1;
let Uses = [USR];
let Constraints = "$Rx32 = $Rx32in";
}
@@ -4504,14 +4561,13 @@ def F2_sffma_sc : HInst<
(outs IntRegs:$Rx32),
(ins IntRegs:$Rx32in, IntRegs:$Rs32, IntRegs:$Rt32, PredRegs:$Pu4),
"$Rx32 += sfmpy($Rs32,$Rt32,$Pu4):scale",
-M_tc_3or4x_acc_SLOT23, TypeM>, Enc_15194851, Requires<[HasV5T]> {
+tc_2e55aa16, TypeM>, Enc_437f33, Requires<[HasV5T]> {
let Inst{7-7} = 0b1;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101111011;
let hasNewValue = 1;
let opNewValue = 0;
let isFP = 1;
-let prefersSlot3 = 1;
let Uses = [USR];
let Constraints = "$Rx32 = $Rx32in";
}
@@ -4519,14 +4575,13 @@ def F2_sffms : HInst<
(outs IntRegs:$Rx32),
(ins IntRegs:$Rx32in, IntRegs:$Rs32, IntRegs:$Rt32),
"$Rx32 -= sfmpy($Rs32,$Rt32)",
-M_tc_3or4x_acc_SLOT23, TypeM>, Enc_9223889, Requires<[HasV5T]> {
+tc_2d1e6f5c, TypeM>, Enc_2ae154, Requires<[HasV5T]> {
let Inst{7-5} = 0b101;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101111000;
let hasNewValue = 1;
let opNewValue = 0;
let isFP = 1;
-let prefersSlot3 = 1;
let Uses = [USR];
let Constraints = "$Rx32 = $Rx32in";
}
@@ -4534,14 +4589,13 @@ def F2_sffms_lib : HInst<
(outs IntRegs:$Rx32),
(ins IntRegs:$Rx32in, IntRegs:$Rs32, IntRegs:$Rt32),
"$Rx32 -= sfmpy($Rs32,$Rt32):lib",
-M_tc_3or4x_acc_SLOT23, TypeM>, Enc_9223889, Requires<[HasV5T]> {
+tc_2d1e6f5c, TypeM>, Enc_2ae154, Requires<[HasV5T]> {
let Inst{7-5} = 0b111;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101111000;
let hasNewValue = 1;
let opNewValue = 0;
let isFP = 1;
-let prefersSlot3 = 1;
let Uses = [USR];
let Constraints = "$Rx32 = $Rx32in";
}
@@ -4549,7 +4603,7 @@ def F2_sfimm_n : HInst<
(outs IntRegs:$Rd32),
(ins u10_0Imm:$Ii),
"$Rd32 = sfmake(#$Ii):neg",
-ALU64_tc_2_SLOT23, TypeALU64>, Enc_9082775, Requires<[HasV5T]> {
+tc_485bb57c, TypeALU64>, Enc_6c9440, Requires<[HasV5T]> {
let Inst{20-16} = 0b00000;
let Inst{31-22} = 0b1101011001;
let hasNewValue = 1;
@@ -4560,7 +4614,7 @@ def F2_sfimm_p : HInst<
(outs IntRegs:$Rd32),
(ins u10_0Imm:$Ii),
"$Rd32 = sfmake(#$Ii):pos",
-ALU64_tc_2_SLOT23, TypeALU64>, Enc_9082775, Requires<[HasV5T]> {
+tc_485bb57c, TypeALU64>, Enc_6c9440, Requires<[HasV5T]> {
let Inst{20-16} = 0b00000;
let Inst{31-22} = 0b1101011000;
let hasNewValue = 1;
@@ -4571,20 +4625,19 @@ def F2_sfinvsqrta : HInst<
(outs IntRegs:$Rd32, PredRegs:$Pe4),
(ins IntRegs:$Rs32),
"$Rd32,$Pe4 = sfinvsqrta($Rs32)",
-S_2op_tc_3or4x_SLOT23, TypeS_2op>, Enc_5718302, Requires<[HasV5T]> {
+tc_f1aa2cdb, TypeS_2op>, Enc_890909, Requires<[HasV5T]> {
let Inst{13-7} = 0b0000000;
let Inst{31-21} = 0b10001011111;
let hasNewValue = 1;
let opNewValue = 0;
let isFP = 1;
let isPredicateLate = 1;
-let prefersSlot3 = 1;
}
def F2_sfmax : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rs32, IntRegs:$Rt32),
"$Rd32 = sfmax($Rs32,$Rt32)",
-M_tc_2_SLOT23, TypeM>, Enc_14071773, Requires<[HasV5T]> {
+tc_f1240c08, TypeM>, Enc_5ab2be, Requires<[HasV5T]> {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101011100;
@@ -4598,7 +4651,7 @@ def F2_sfmin : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rs32, IntRegs:$Rt32),
"$Rd32 = sfmin($Rs32,$Rt32)",
-M_tc_2_SLOT23, TypeM>, Enc_14071773, Requires<[HasV5T]> {
+tc_f1240c08, TypeM>, Enc_5ab2be, Requires<[HasV5T]> {
let Inst{7-5} = 0b001;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101011100;
@@ -4612,14 +4665,13 @@ def F2_sfmpy : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rs32, IntRegs:$Rt32),
"$Rd32 = sfmpy($Rs32,$Rt32)",
-M_tc_3or4x_SLOT23, TypeM>, Enc_14071773, Requires<[HasV5T]> {
+tc_3bea1824, TypeM>, Enc_5ab2be, Requires<[HasV5T]> {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101011010;
let hasNewValue = 1;
let opNewValue = 0;
let isFP = 1;
-let prefersSlot3 = 1;
let Uses = [USR];
let isCommutable = 1;
}
@@ -4627,7 +4679,7 @@ def F2_sfrecipa : HInst<
(outs IntRegs:$Rd32, PredRegs:$Pe4),
(ins IntRegs:$Rs32, IntRegs:$Rt32),
"$Rd32,$Pe4 = sfrecipa($Rs32,$Rt32)",
-M_tc_3or4x_SLOT23, TypeM>, Enc_5853469, Requires<[HasV5T]> {
+tc_09c86199, TypeM>, Enc_a94f3b, Requires<[HasV5T]> {
let Inst{7-7} = 0b1;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101011111;
@@ -4635,27 +4687,25 @@ let hasNewValue = 1;
let opNewValue = 0;
let isFP = 1;
let isPredicateLate = 1;
-let prefersSlot3 = 1;
}
def F2_sfsub : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rs32, IntRegs:$Rt32),
"$Rd32 = sfsub($Rs32,$Rt32)",
-M_tc_3or4x_SLOT23, TypeM>, Enc_14071773, Requires<[HasV5T]> {
+tc_3bea1824, TypeM>, Enc_5ab2be, Requires<[HasV5T]> {
let Inst{7-5} = 0b001;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101011000;
let hasNewValue = 1;
let opNewValue = 0;
let isFP = 1;
-let prefersSlot3 = 1;
let Uses = [USR];
}
def J2_call : HInst<
(outs),
(ins a30_2Imm:$Ii),
"call $Ii",
-J_tc_2early_SLOT23, TypeJ>, Enc_13453446, PredRel {
+tc_639d93ee, TypeJ>, Enc_81ac1d, PredRel {
let Inst{0-0} = 0b0;
let Inst{31-25} = 0b0101101;
let isCall = 1;
@@ -4675,7 +4725,7 @@ def J2_callf : HInst<
(outs),
(ins PredRegs:$Pu4, a30_2Imm:$Ii),
"if (!$Pu4) call $Ii",
-J_tc_2early_SLOT23, TypeJ>, Enc_14868535, PredRel {
+tc_0767081f, TypeJ>, Enc_daea09, PredRel {
let Inst{0-0} = 0b0;
let Inst{12-10} = 0b000;
let Inst{21-21} = 0b1;
@@ -4699,7 +4749,7 @@ def J2_callr : HInst<
(outs),
(ins IntRegs:$Rs32),
"callr $Rs32",
-J_tc_2early_SLOT2, TypeJ>, Enc_11704059 {
+tc_ecfaae86, TypeJ>, Enc_ecbcc8 {
let Inst{13-0} = 0b00000000000000;
let Inst{31-21} = 0b01010000101;
let cofMax1 = 1;
@@ -4713,7 +4763,7 @@ def J2_callrf : HInst<
(outs),
(ins PredRegs:$Pu4, IntRegs:$Rs32),
"if (!$Pu4) callr $Rs32",
-J_tc_2early_SLOT2, TypeJ>, Enc_1928953 {
+tc_84630363, TypeJ>, Enc_88d4d9 {
let Inst{7-0} = 0b00000000;
let Inst{13-10} = 0b0000;
let Inst{31-21} = 0b01010001001;
@@ -4731,7 +4781,7 @@ def J2_callrt : HInst<
(outs),
(ins PredRegs:$Pu4, IntRegs:$Rs32),
"if ($Pu4) callr $Rs32",
-J_tc_2early_SLOT2, TypeJ>, Enc_1928953 {
+tc_84630363, TypeJ>, Enc_88d4d9 {
let Inst{7-0} = 0b00000000;
let Inst{13-10} = 0b0000;
let Inst{31-21} = 0b01010001000;
@@ -4748,7 +4798,7 @@ def J2_callt : HInst<
(outs),
(ins PredRegs:$Pu4, a30_2Imm:$Ii),
"if ($Pu4) call $Ii",
-J_tc_2early_SLOT23, TypeJ>, Enc_14868535, PredRel {
+tc_0767081f, TypeJ>, Enc_daea09, PredRel {
let Inst{0-0} = 0b0;
let Inst{12-10} = 0b000;
let Inst{21-21} = 0b0;
@@ -4771,16 +4821,18 @@ def J2_endloop0 : HInst<
(outs),
(ins),
"endloop0",
-PSEUDO, TypeJ> {
+tc_aad55963, TypeJ> {
let Uses = [LC0, SA0];
let Defs = [LC0, P3, PC, USR];
+let isBranch = 1;
+let isTerminator = 1;
let isPseudo = 1;
}
def J2_endloop01 : HInst<
(outs),
(ins),
"endloop01",
-PSEUDO, TypeJ> {
+tc_aad55963, TypeJ> {
let Uses = [LC0, LC1, SA0, SA1];
let Defs = [LC0, LC1, P3, PC, USR];
let isPseudo = 1;
@@ -4789,16 +4841,18 @@ def J2_endloop1 : HInst<
(outs),
(ins),
"endloop1",
-PSEUDO, TypeJ> {
+tc_aad55963, TypeJ> {
let Uses = [LC1, SA1];
let Defs = [LC1, PC];
+let isBranch = 1;
+let isTerminator = 1;
let isPseudo = 1;
}
def J2_jump : HInst<
(outs),
(ins b30_2Imm:$Ii),
"jump $Ii",
-J_tc_2early_CJUMP_UCJUMP_ARCHDEPSLOT, TypeJ>, Enc_13453446, PredNewRel {
+tc_a333d2a9, TypeJ>, Enc_81ac1d, PredNewRel {
let Inst{0-0} = 0b0;
let Inst{31-25} = 0b0101100;
let isTerminator = 1;
@@ -4818,7 +4872,7 @@ def J2_jumpf : HInst<
(outs),
(ins PredRegs:$Pu4, b30_2Imm:$Ii),
"if (!$Pu4) jump:nt $Ii",
-J_tc_2early_CJUMP_UCJUMP_ARCHDEPSLOT, TypeJ>, Enc_14868535, PredNewRel {
+tc_1b834fe7, TypeJ>, Enc_daea09, PredNewRel {
let Inst{0-0} = 0b0;
let Inst{12-10} = 0b000;
let Inst{21-21} = 0b1;
@@ -4841,7 +4895,7 @@ def J2_jumpf_nopred_map : HInst<
(outs),
(ins PredRegs:$Pu4, b15_2Imm:$Ii),
"if (!$Pu4) jump $Ii",
-PSEUDO, TypeMAPPING>, Requires<[HasV60T]> {
+tc_1b834fe7, TypeMAPPING>, Requires<[HasV60T]> {
let isPseudo = 1;
let isCodeGenOnly = 1;
}
@@ -4849,7 +4903,7 @@ def J2_jumpfnew : HInst<
(outs),
(ins PredRegs:$Pu4, b30_2Imm:$Ii),
"if (!$Pu4.new) jump:nt $Ii",
-J_tc_2early_CJUMP_UCJUMP_ARCHDEPSLOT, TypeJ>, Enc_14868535, PredNewRel {
+tc_537e2013, TypeJ>, Enc_daea09, PredNewRel {
let Inst{0-0} = 0b0;
let Inst{12-10} = 0b010;
let Inst{21-21} = 0b1;
@@ -4873,7 +4927,7 @@ def J2_jumpfnewpt : HInst<
(outs),
(ins PredRegs:$Pu4, b30_2Imm:$Ii),
"if (!$Pu4.new) jump:t $Ii",
-J_tc_2early_CJUMP_UCJUMP_ARCHDEPSLOT, TypeJ>, Enc_14868535, PredNewRel {
+tc_537e2013, TypeJ>, Enc_daea09, PredNewRel {
let Inst{0-0} = 0b0;
let Inst{12-10} = 0b110;
let Inst{21-21} = 0b1;
@@ -4897,7 +4951,7 @@ def J2_jumpfpt : HInst<
(outs),
(ins PredRegs:$Pu4, b30_2Imm:$Ii),
"if (!$Pu4) jump:t $Ii",
-J_tc_2early_CJUMP_UCJUMP_ARCHDEPSLOT, TypeJ>, Enc_14868535, Requires<[HasV60T]>, PredNewRel {
+tc_b5bfaa60, TypeJ>, Enc_daea09, Requires<[HasV60T]>, PredNewRel {
let Inst{0-0} = 0b0;
let Inst{12-10} = 0b100;
let Inst{21-21} = 0b1;
@@ -4920,7 +4974,7 @@ def J2_jumpr : HInst<
(outs),
(ins IntRegs:$Rs32),
"jumpr $Rs32",
-J_tc_2early_SLOT2, TypeJ>, Enc_11704059, PredNewRel {
+tc_b08b653e, TypeJ>, Enc_ecbcc8, PredNewRel {
let Inst{13-0} = 0b00000000000000;
let Inst{31-21} = 0b01010010100;
let isTerminator = 1;
@@ -4937,7 +4991,7 @@ def J2_jumprf : HInst<
(outs),
(ins PredRegs:$Pu4, IntRegs:$Rs32),
"if (!$Pu4) jumpr:nt $Rs32",
-J_tc_2early_SLOT2, TypeJ>, Enc_1928953, PredNewRel {
+tc_07ac815d, TypeJ>, Enc_88d4d9, PredNewRel {
let Inst{7-0} = 0b00000000;
let Inst{13-10} = 0b0000;
let Inst{31-21} = 0b01010011011;
@@ -4956,7 +5010,7 @@ def J2_jumprf_nopred_map : HInst<
(outs),
(ins PredRegs:$Pu4, IntRegs:$Rs32),
"if (!$Pu4) jumpr $Rs32",
-PSEUDO, TypeMAPPING>, Requires<[HasV60T]> {
+tc_07ac815d, TypeMAPPING>, Requires<[HasV60T]> {
let isPseudo = 1;
let isCodeGenOnly = 1;
}
@@ -4964,7 +5018,7 @@ def J2_jumprfnew : HInst<
(outs),
(ins PredRegs:$Pu4, IntRegs:$Rs32),
"if (!$Pu4.new) jumpr:nt $Rs32",
-J_tc_2early_SLOT2, TypeJ>, Enc_1928953, PredNewRel {
+tc_1f9668cc, TypeJ>, Enc_88d4d9, PredNewRel {
let Inst{7-0} = 0b00000000;
let Inst{13-10} = 0b0010;
let Inst{31-21} = 0b01010011011;
@@ -4984,7 +5038,7 @@ def J2_jumprfnewpt : HInst<
(outs),
(ins PredRegs:$Pu4, IntRegs:$Rs32),
"if (!$Pu4.new) jumpr:t $Rs32",
-J_tc_2early_SLOT2, TypeJ>, Enc_1928953, PredNewRel {
+tc_1f9668cc, TypeJ>, Enc_88d4d9, PredNewRel {
let Inst{7-0} = 0b00000000;
let Inst{13-10} = 0b0110;
let Inst{31-21} = 0b01010011011;
@@ -5004,7 +5058,7 @@ def J2_jumprfpt : HInst<
(outs),
(ins PredRegs:$Pu4, IntRegs:$Rs32),
"if (!$Pu4) jumpr:t $Rs32",
-J_tc_2early_SLOT2, TypeJ>, Enc_1928953, Requires<[HasV60T]>, PredNewRel {
+tc_a1fb80e1, TypeJ>, Enc_88d4d9, Requires<[HasV60T]>, PredNewRel {
let Inst{7-0} = 0b00000000;
let Inst{13-10} = 0b0100;
let Inst{31-21} = 0b01010011011;
@@ -5023,7 +5077,7 @@ def J2_jumprgtez : HInst<
(outs),
(ins IntRegs:$Rs32, b13_2Imm:$Ii),
"if ($Rs32>=#0) jump:nt $Ii",
-CR_tc_2early_SLOT3, TypeCR>, Enc_12477789 {
+tc_b324366f, TypeCR>, Enc_0fa531 {
let Inst{0-0} = 0b0;
let Inst{12-12} = 0b0;
let Inst{31-22} = 0b0110000101;
@@ -5038,7 +5092,7 @@ def J2_jumprgtezpt : HInst<
(outs),
(ins IntRegs:$Rs32, b13_2Imm:$Ii),
"if ($Rs32>=#0) jump:t $Ii",
-CR_tc_2early_SLOT3, TypeCR>, Enc_12477789 {
+tc_b324366f, TypeCR>, Enc_0fa531 {
let Inst{0-0} = 0b0;
let Inst{12-12} = 0b1;
let Inst{31-22} = 0b0110000101;
@@ -5053,7 +5107,7 @@ def J2_jumprltez : HInst<
(outs),
(ins IntRegs:$Rs32, b13_2Imm:$Ii),
"if ($Rs32<=#0) jump:nt $Ii",
-CR_tc_2early_SLOT3, TypeCR>, Enc_12477789 {
+tc_b324366f, TypeCR>, Enc_0fa531 {
let Inst{0-0} = 0b0;
let Inst{12-12} = 0b0;
let Inst{31-22} = 0b0110000111;
@@ -5068,7 +5122,7 @@ def J2_jumprltezpt : HInst<
(outs),
(ins IntRegs:$Rs32, b13_2Imm:$Ii),
"if ($Rs32<=#0) jump:t $Ii",
-CR_tc_2early_SLOT3, TypeCR>, Enc_12477789 {
+tc_b324366f, TypeCR>, Enc_0fa531 {
let Inst{0-0} = 0b0;
let Inst{12-12} = 0b1;
let Inst{31-22} = 0b0110000111;
@@ -5083,7 +5137,7 @@ def J2_jumprnz : HInst<
(outs),
(ins IntRegs:$Rs32, b13_2Imm:$Ii),
"if ($Rs32==#0) jump:nt $Ii",
-CR_tc_2early_SLOT3, TypeCR>, Enc_12477789 {
+tc_b324366f, TypeCR>, Enc_0fa531 {
let Inst{0-0} = 0b0;
let Inst{12-12} = 0b0;
let Inst{31-22} = 0b0110000110;
@@ -5098,7 +5152,7 @@ def J2_jumprnzpt : HInst<
(outs),
(ins IntRegs:$Rs32, b13_2Imm:$Ii),
"if ($Rs32==#0) jump:t $Ii",
-CR_tc_2early_SLOT3, TypeCR>, Enc_12477789 {
+tc_b324366f, TypeCR>, Enc_0fa531 {
let Inst{0-0} = 0b0;
let Inst{12-12} = 0b1;
let Inst{31-22} = 0b0110000110;
@@ -5113,7 +5167,7 @@ def J2_jumprt : HInst<
(outs),
(ins PredRegs:$Pu4, IntRegs:$Rs32),
"if ($Pu4) jumpr:nt $Rs32",
-J_tc_2early_SLOT2, TypeJ>, Enc_1928953, PredNewRel {
+tc_07ac815d, TypeJ>, Enc_88d4d9, PredNewRel {
let Inst{7-0} = 0b00000000;
let Inst{13-10} = 0b0000;
let Inst{31-21} = 0b01010011010;
@@ -5131,7 +5185,7 @@ def J2_jumprt_nopred_map : HInst<
(outs),
(ins PredRegs:$Pu4, IntRegs:$Rs32),
"if ($Pu4) jumpr $Rs32",
-PSEUDO, TypeMAPPING>, Requires<[HasV60T]> {
+tc_07ac815d, TypeMAPPING>, Requires<[HasV60T]> {
let isPseudo = 1;
let isCodeGenOnly = 1;
}
@@ -5139,7 +5193,7 @@ def J2_jumprtnew : HInst<
(outs),
(ins PredRegs:$Pu4, IntRegs:$Rs32),
"if ($Pu4.new) jumpr:nt $Rs32",
-J_tc_2early_SLOT2, TypeJ>, Enc_1928953, PredNewRel {
+tc_1f9668cc, TypeJ>, Enc_88d4d9, PredNewRel {
let Inst{7-0} = 0b00000000;
let Inst{13-10} = 0b0010;
let Inst{31-21} = 0b01010011010;
@@ -5158,7 +5212,7 @@ def J2_jumprtnewpt : HInst<
(outs),
(ins PredRegs:$Pu4, IntRegs:$Rs32),
"if ($Pu4.new) jumpr:t $Rs32",
-J_tc_2early_SLOT2, TypeJ>, Enc_1928953, PredNewRel {
+tc_1f9668cc, TypeJ>, Enc_88d4d9, PredNewRel {
let Inst{7-0} = 0b00000000;
let Inst{13-10} = 0b0110;
let Inst{31-21} = 0b01010011010;
@@ -5177,7 +5231,7 @@ def J2_jumprtpt : HInst<
(outs),
(ins PredRegs:$Pu4, IntRegs:$Rs32),
"if ($Pu4) jumpr:t $Rs32",
-J_tc_2early_SLOT2, TypeJ>, Enc_1928953, Requires<[HasV60T]>, PredNewRel {
+tc_a1fb80e1, TypeJ>, Enc_88d4d9, Requires<[HasV60T]>, PredNewRel {
let Inst{7-0} = 0b00000000;
let Inst{13-10} = 0b0100;
let Inst{31-21} = 0b01010011010;
@@ -5195,7 +5249,7 @@ def J2_jumprz : HInst<
(outs),
(ins IntRegs:$Rs32, b13_2Imm:$Ii),
"if ($Rs32!=#0) jump:nt $Ii",
-CR_tc_2early_SLOT3, TypeCR>, Enc_12477789 {
+tc_b324366f, TypeCR>, Enc_0fa531 {
let Inst{0-0} = 0b0;
let Inst{12-12} = 0b0;
let Inst{31-22} = 0b0110000100;
@@ -5210,7 +5264,7 @@ def J2_jumprzpt : HInst<
(outs),
(ins IntRegs:$Rs32, b13_2Imm:$Ii),
"if ($Rs32!=#0) jump:t $Ii",
-CR_tc_2early_SLOT3, TypeCR>, Enc_12477789 {
+tc_b324366f, TypeCR>, Enc_0fa531 {
let Inst{0-0} = 0b0;
let Inst{12-12} = 0b1;
let Inst{31-22} = 0b0110000100;
@@ -5225,7 +5279,7 @@ def J2_jumpt : HInst<
(outs),
(ins PredRegs:$Pu4, b30_2Imm:$Ii),
"if ($Pu4) jump:nt $Ii",
-J_tc_2early_CJUMP_UCJUMP_ARCHDEPSLOT, TypeJ>, Enc_14868535, PredNewRel {
+tc_1b834fe7, TypeJ>, Enc_daea09, PredNewRel {
let Inst{0-0} = 0b0;
let Inst{12-10} = 0b000;
let Inst{21-21} = 0b0;
@@ -5247,7 +5301,7 @@ def J2_jumpt_nopred_map : HInst<
(outs),
(ins PredRegs:$Pu4, b15_2Imm:$Ii),
"if ($Pu4) jump $Ii",
-PSEUDO, TypeMAPPING>, Requires<[HasV60T]> {
+tc_1b834fe7, TypeMAPPING>, Requires<[HasV60T]> {
let isPseudo = 1;
let isCodeGenOnly = 1;
}
@@ -5255,7 +5309,7 @@ def J2_jumptnew : HInst<
(outs),
(ins PredRegs:$Pu4, b30_2Imm:$Ii),
"if ($Pu4.new) jump:nt $Ii",
-J_tc_2early_CJUMP_UCJUMP_ARCHDEPSLOT, TypeJ>, Enc_14868535, PredNewRel {
+tc_537e2013, TypeJ>, Enc_daea09, PredNewRel {
let Inst{0-0} = 0b0;
let Inst{12-10} = 0b010;
let Inst{21-21} = 0b0;
@@ -5278,7 +5332,7 @@ def J2_jumptnewpt : HInst<
(outs),
(ins PredRegs:$Pu4, b30_2Imm:$Ii),
"if ($Pu4.new) jump:t $Ii",
-J_tc_2early_CJUMP_UCJUMP_ARCHDEPSLOT, TypeJ>, Enc_14868535, PredNewRel {
+tc_537e2013, TypeJ>, Enc_daea09, PredNewRel {
let Inst{0-0} = 0b0;
let Inst{12-10} = 0b110;
let Inst{21-21} = 0b0;
@@ -5301,7 +5355,7 @@ def J2_jumptpt : HInst<
(outs),
(ins PredRegs:$Pu4, b30_2Imm:$Ii),
"if ($Pu4) jump:t $Ii",
-J_tc_2early_CJUMP_UCJUMP_ARCHDEPSLOT, TypeJ>, Enc_14868535, Requires<[HasV60T]>, PredNewRel {
+tc_b5bfaa60, TypeJ>, Enc_daea09, Requires<[HasV60T]>, PredNewRel {
let Inst{0-0} = 0b0;
let Inst{12-10} = 0b100;
let Inst{21-21} = 0b0;
@@ -5323,7 +5377,7 @@ def J2_loop0i : HInst<
(outs),
(ins b30_2Imm:$Ii, u10_0Imm:$II),
"loop0($Ii,#$II)",
-CR_tc_3x_SLOT3, TypeCR>, Enc_9939385 {
+tc_1000eb10, TypeCR>, Enc_4dc228 {
let Inst{2-2} = 0b0;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b01101001000;
@@ -5338,7 +5392,7 @@ def J2_loop0r : HInst<
(outs),
(ins b30_2Imm:$Ii, IntRegs:$Rs32),
"loop0($Ii,$Rs32)",
-CR_tc_3x_SLOT3, TypeCR>, Enc_5790679 {
+tc_f055fbb6, TypeCR>, Enc_864a5a {
let Inst{2-0} = 0b000;
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b0;
@@ -5354,7 +5408,7 @@ def J2_loop1i : HInst<
(outs),
(ins b30_2Imm:$Ii, u10_0Imm:$II),
"loop1($Ii,#$II)",
-CR_tc_3x_SLOT3, TypeCR>, Enc_9939385 {
+tc_1000eb10, TypeCR>, Enc_4dc228 {
let Inst{2-2} = 0b0;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b01101001001;
@@ -5369,7 +5423,7 @@ def J2_loop1r : HInst<
(outs),
(ins b30_2Imm:$Ii, IntRegs:$Rs32),
"loop1($Ii,$Rs32)",
-CR_tc_3x_SLOT3, TypeCR>, Enc_5790679 {
+tc_f055fbb6, TypeCR>, Enc_864a5a {
let Inst{2-0} = 0b000;
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b0;
@@ -5385,7 +5439,7 @@ def J2_pause : HInst<
(outs),
(ins u8_0Imm:$Ii),
"pause(#$Ii)",
-J_tc_2early_SLOT2, TypeJ>, Enc_8732960 {
+tc_b189ad4c, TypeJ>, Enc_a51a9a {
let Inst{1-0} = 0b00;
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b0;
@@ -5396,7 +5450,7 @@ def J2_ploop1si : HInst<
(outs),
(ins b30_2Imm:$Ii, u10_0Imm:$II),
"p3 = sp1loop0($Ii,#$II)",
-CR_tc_2early_SLOT3, TypeCR>, Enc_9939385 {
+tc_feb4974b, TypeCR>, Enc_4dc228 {
let Inst{2-2} = 0b0;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b01101001101;
@@ -5412,7 +5466,7 @@ def J2_ploop1sr : HInst<
(outs),
(ins b30_2Imm:$Ii, IntRegs:$Rs32),
"p3 = sp1loop0($Ii,$Rs32)",
-CR_tc_2early_SLOT3, TypeCR>, Enc_5790679 {
+tc_d6a805a8, TypeCR>, Enc_864a5a {
let Inst{2-0} = 0b000;
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b0;
@@ -5429,7 +5483,7 @@ def J2_ploop2si : HInst<
(outs),
(ins b30_2Imm:$Ii, u10_0Imm:$II),
"p3 = sp2loop0($Ii,#$II)",
-CR_tc_2early_SLOT3, TypeCR>, Enc_9939385 {
+tc_feb4974b, TypeCR>, Enc_4dc228 {
let Inst{2-2} = 0b0;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b01101001110;
@@ -5445,7 +5499,7 @@ def J2_ploop2sr : HInst<
(outs),
(ins b30_2Imm:$Ii, IntRegs:$Rs32),
"p3 = sp2loop0($Ii,$Rs32)",
-CR_tc_2early_SLOT3, TypeCR>, Enc_5790679 {
+tc_d6a805a8, TypeCR>, Enc_864a5a {
let Inst{2-0} = 0b000;
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b0;
@@ -5462,7 +5516,7 @@ def J2_ploop3si : HInst<
(outs),
(ins b30_2Imm:$Ii, u10_0Imm:$II),
"p3 = sp3loop0($Ii,#$II)",
-CR_tc_2early_SLOT3, TypeCR>, Enc_9939385 {
+tc_feb4974b, TypeCR>, Enc_4dc228 {
let Inst{2-2} = 0b0;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b01101001111;
@@ -5478,7 +5532,7 @@ def J2_ploop3sr : HInst<
(outs),
(ins b30_2Imm:$Ii, IntRegs:$Rs32),
"p3 = sp3loop0($Ii,$Rs32)",
-CR_tc_2early_SLOT3, TypeCR>, Enc_5790679 {
+tc_d6a805a8, TypeCR>, Enc_864a5a {
let Inst{2-0} = 0b000;
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b0;
@@ -5495,7 +5549,7 @@ def J2_trap0 : HInst<
(outs),
(ins u8_0Imm:$Ii),
"trap0(#$Ii)",
-J_tc_2early_SLOT2, TypeJ>, Enc_8732960 {
+tc_cbe45117, TypeJ>, Enc_a51a9a {
let Inst{1-0} = 0b00;
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b0;
@@ -5506,7 +5560,7 @@ def J4_cmpeq_f_jumpnv_nt : HInst<
(outs),
(ins IntRegs:$Ns8, IntRegs:$Rt32, b30_2Imm:$Ii),
"if (!cmp.eq($Ns8.new,$Rt32)) jump:nt $Ii",
-NCJ_tc_3or4stall_SLOT0, TypeNCJ>, Enc_15140689, PredRel {
+tc_580a779c, TypeNCJ>, Enc_c9a18e, PredRel {
let Inst{0-0} = 0b0;
let Inst{13-13} = 0b0;
let Inst{19-19} = 0b0;
@@ -5531,7 +5585,7 @@ def J4_cmpeq_f_jumpnv_t : HInst<
(outs),
(ins IntRegs:$Ns8, IntRegs:$Rt32, b30_2Imm:$Ii),
"if (!cmp.eq($Ns8.new,$Rt32)) jump:t $Ii",
-NCJ_tc_3or4stall_SLOT0, TypeNCJ>, Enc_15140689, PredRel {
+tc_580a779c, TypeNCJ>, Enc_c9a18e, PredRel {
let Inst{0-0} = 0b0;
let Inst{13-13} = 0b1;
let Inst{19-19} = 0b0;
@@ -5556,7 +5610,7 @@ def J4_cmpeq_fp0_jump_nt : HInst<
(outs),
(ins GeneralSubRegs:$Rs16, GeneralSubRegs:$Rt16, b30_2Imm:$Ii),
"p0 = cmp.eq($Rs16,$Rt16); if (!p0.new) jump:nt $Ii",
-COMPOUND_CJ_ARCHDEPSLOT, TypeCJ>, Enc_14264243, PredRel {
+tc_92d1833c, TypeCJ>, Enc_6a5972, PredRel {
let Inst{0-0} = 0b0;
let Inst{13-12} = 0b00;
let Inst{31-22} = 0b0001010001;
@@ -5579,7 +5633,7 @@ def J4_cmpeq_fp0_jump_t : HInst<
(outs),
(ins GeneralSubRegs:$Rs16, GeneralSubRegs:$Rt16, b30_2Imm:$Ii),
"p0 = cmp.eq($Rs16,$Rt16); if (!p0.new) jump:t $Ii",
-COMPOUND_CJ_ARCHDEPSLOT, TypeCJ>, Enc_14264243, PredRel {
+tc_92d1833c, TypeCJ>, Enc_6a5972, PredRel {
let Inst{0-0} = 0b0;
let Inst{13-12} = 0b10;
let Inst{31-22} = 0b0001010001;
@@ -5602,7 +5656,7 @@ def J4_cmpeq_fp1_jump_nt : HInst<
(outs),
(ins GeneralSubRegs:$Rs16, GeneralSubRegs:$Rt16, b30_2Imm:$Ii),
"p1 = cmp.eq($Rs16,$Rt16); if (!p1.new) jump:nt $Ii",
-COMPOUND_CJ_ARCHDEPSLOT, TypeCJ>, Enc_14264243, PredRel {
+tc_92d1833c, TypeCJ>, Enc_6a5972, PredRel {
let Inst{0-0} = 0b0;
let Inst{13-12} = 0b01;
let Inst{31-22} = 0b0001010001;
@@ -5625,7 +5679,7 @@ def J4_cmpeq_fp1_jump_t : HInst<
(outs),
(ins GeneralSubRegs:$Rs16, GeneralSubRegs:$Rt16, b30_2Imm:$Ii),
"p1 = cmp.eq($Rs16,$Rt16); if (!p1.new) jump:t $Ii",
-COMPOUND_CJ_ARCHDEPSLOT, TypeCJ>, Enc_14264243, PredRel {
+tc_92d1833c, TypeCJ>, Enc_6a5972, PredRel {
let Inst{0-0} = 0b0;
let Inst{13-12} = 0b11;
let Inst{31-22} = 0b0001010001;
@@ -5648,7 +5702,7 @@ def J4_cmpeq_t_jumpnv_nt : HInst<
(outs),
(ins IntRegs:$Ns8, IntRegs:$Rt32, b30_2Imm:$Ii),
"if (cmp.eq($Ns8.new,$Rt32)) jump:nt $Ii",
-NCJ_tc_3or4stall_SLOT0, TypeNCJ>, Enc_15140689, PredRel {
+tc_580a779c, TypeNCJ>, Enc_c9a18e, PredRel {
let Inst{0-0} = 0b0;
let Inst{13-13} = 0b0;
let Inst{19-19} = 0b0;
@@ -5672,7 +5726,7 @@ def J4_cmpeq_t_jumpnv_t : HInst<
(outs),
(ins IntRegs:$Ns8, IntRegs:$Rt32, b30_2Imm:$Ii),
"if (cmp.eq($Ns8.new,$Rt32)) jump:t $Ii",
-NCJ_tc_3or4stall_SLOT0, TypeNCJ>, Enc_15140689, PredRel {
+tc_580a779c, TypeNCJ>, Enc_c9a18e, PredRel {
let Inst{0-0} = 0b0;
let Inst{13-13} = 0b1;
let Inst{19-19} = 0b0;
@@ -5696,7 +5750,7 @@ def J4_cmpeq_tp0_jump_nt : HInst<
(outs),
(ins GeneralSubRegs:$Rs16, GeneralSubRegs:$Rt16, b30_2Imm:$Ii),
"p0 = cmp.eq($Rs16,$Rt16); if (p0.new) jump:nt $Ii",
-COMPOUND_CJ_ARCHDEPSLOT, TypeCJ>, Enc_14264243, PredRel {
+tc_92d1833c, TypeCJ>, Enc_6a5972, PredRel {
let Inst{0-0} = 0b0;
let Inst{13-12} = 0b00;
let Inst{31-22} = 0b0001010000;
@@ -5718,7 +5772,7 @@ def J4_cmpeq_tp0_jump_t : HInst<
(outs),
(ins GeneralSubRegs:$Rs16, GeneralSubRegs:$Rt16, b30_2Imm:$Ii),
"p0 = cmp.eq($Rs16,$Rt16); if (p0.new) jump:t $Ii",
-COMPOUND_CJ_ARCHDEPSLOT, TypeCJ>, Enc_14264243, PredRel {
+tc_92d1833c, TypeCJ>, Enc_6a5972, PredRel {
let Inst{0-0} = 0b0;
let Inst{13-12} = 0b10;
let Inst{31-22} = 0b0001010000;
@@ -5740,7 +5794,7 @@ def J4_cmpeq_tp1_jump_nt : HInst<
(outs),
(ins GeneralSubRegs:$Rs16, GeneralSubRegs:$Rt16, b30_2Imm:$Ii),
"p1 = cmp.eq($Rs16,$Rt16); if (p1.new) jump:nt $Ii",
-COMPOUND_CJ_ARCHDEPSLOT, TypeCJ>, Enc_14264243, PredRel {
+tc_92d1833c, TypeCJ>, Enc_6a5972, PredRel {
let Inst{0-0} = 0b0;
let Inst{13-12} = 0b01;
let Inst{31-22} = 0b0001010000;
@@ -5762,7 +5816,7 @@ def J4_cmpeq_tp1_jump_t : HInst<
(outs),
(ins GeneralSubRegs:$Rs16, GeneralSubRegs:$Rt16, b30_2Imm:$Ii),
"p1 = cmp.eq($Rs16,$Rt16); if (p1.new) jump:t $Ii",
-COMPOUND_CJ_ARCHDEPSLOT, TypeCJ>, Enc_14264243, PredRel {
+tc_92d1833c, TypeCJ>, Enc_6a5972, PredRel {
let Inst{0-0} = 0b0;
let Inst{13-12} = 0b11;
let Inst{31-22} = 0b0001010000;
@@ -5784,7 +5838,7 @@ def J4_cmpeqi_f_jumpnv_nt : HInst<
(outs),
(ins IntRegs:$Ns8, u5_0Imm:$II, b30_2Imm:$Ii),
"if (!cmp.eq($Ns8.new,#$II)) jump:nt $Ii",
-NCJ_tc_3or4stall_SLOT0, TypeNCJ>, Enc_4397470, PredRel {
+tc_09faec3b, TypeNCJ>, Enc_eafd18, PredRel {
let Inst{0-0} = 0b0;
let Inst{13-13} = 0b0;
let Inst{19-19} = 0b0;
@@ -5809,7 +5863,7 @@ def J4_cmpeqi_f_jumpnv_t : HInst<
(outs),
(ins IntRegs:$Ns8, u5_0Imm:$II, b30_2Imm:$Ii),
"if (!cmp.eq($Ns8.new,#$II)) jump:t $Ii",
-NCJ_tc_3or4stall_SLOT0, TypeNCJ>, Enc_4397470, PredRel {
+tc_09faec3b, TypeNCJ>, Enc_eafd18, PredRel {
let Inst{0-0} = 0b0;
let Inst{13-13} = 0b1;
let Inst{19-19} = 0b0;
@@ -5834,7 +5888,7 @@ def J4_cmpeqi_fp0_jump_nt : HInst<
(outs),
(ins GeneralSubRegs:$Rs16, u5_0Imm:$II, b30_2Imm:$Ii),
"p0 = cmp.eq($Rs16,#$II); if (!p0.new) jump:nt $Ii",
-COMPOUND_CJ_ARCHDEPSLOT, TypeCJ>, Enc_7305764, PredRel {
+tc_d108a090, TypeCJ>, Enc_14d27a, PredRel {
let Inst{0-0} = 0b0;
let Inst{13-13} = 0b0;
let Inst{31-22} = 0b0001000001;
@@ -5857,7 +5911,7 @@ def J4_cmpeqi_fp0_jump_t : HInst<
(outs),
(ins GeneralSubRegs:$Rs16, u5_0Imm:$II, b30_2Imm:$Ii),
"p0 = cmp.eq($Rs16,#$II); if (!p0.new) jump:t $Ii",
-COMPOUND_CJ_ARCHDEPSLOT, TypeCJ>, Enc_7305764, PredRel {
+tc_d108a090, TypeCJ>, Enc_14d27a, PredRel {
let Inst{0-0} = 0b0;
let Inst{13-13} = 0b1;
let Inst{31-22} = 0b0001000001;
@@ -5880,7 +5934,7 @@ def J4_cmpeqi_fp1_jump_nt : HInst<
(outs),
(ins GeneralSubRegs:$Rs16, u5_0Imm:$II, b30_2Imm:$Ii),
"p1 = cmp.eq($Rs16,#$II); if (!p1.new) jump:nt $Ii",
-COMPOUND_CJ_ARCHDEPSLOT, TypeCJ>, Enc_7305764, PredRel {
+tc_d108a090, TypeCJ>, Enc_14d27a, PredRel {
let Inst{0-0} = 0b0;
let Inst{13-13} = 0b0;
let Inst{31-22} = 0b0001001001;
@@ -5903,7 +5957,7 @@ def J4_cmpeqi_fp1_jump_t : HInst<
(outs),
(ins GeneralSubRegs:$Rs16, u5_0Imm:$II, b30_2Imm:$Ii),
"p1 = cmp.eq($Rs16,#$II); if (!p1.new) jump:t $Ii",
-COMPOUND_CJ_ARCHDEPSLOT, TypeCJ>, Enc_7305764, PredRel {
+tc_d108a090, TypeCJ>, Enc_14d27a, PredRel {
let Inst{0-0} = 0b0;
let Inst{13-13} = 0b1;
let Inst{31-22} = 0b0001001001;
@@ -5926,7 +5980,7 @@ def J4_cmpeqi_t_jumpnv_nt : HInst<
(outs),
(ins IntRegs:$Ns8, u5_0Imm:$II, b30_2Imm:$Ii),
"if (cmp.eq($Ns8.new,#$II)) jump:nt $Ii",
-NCJ_tc_3or4stall_SLOT0, TypeNCJ>, Enc_4397470, PredRel {
+tc_09faec3b, TypeNCJ>, Enc_eafd18, PredRel {
let Inst{0-0} = 0b0;
let Inst{13-13} = 0b0;
let Inst{19-19} = 0b0;
@@ -5950,7 +6004,7 @@ def J4_cmpeqi_t_jumpnv_t : HInst<
(outs),
(ins IntRegs:$Ns8, u5_0Imm:$II, b30_2Imm:$Ii),
"if (cmp.eq($Ns8.new,#$II)) jump:t $Ii",
-NCJ_tc_3or4stall_SLOT0, TypeNCJ>, Enc_4397470, PredRel {
+tc_09faec3b, TypeNCJ>, Enc_eafd18, PredRel {
let Inst{0-0} = 0b0;
let Inst{13-13} = 0b1;
let Inst{19-19} = 0b0;
@@ -5974,7 +6028,7 @@ def J4_cmpeqi_tp0_jump_nt : HInst<
(outs),
(ins GeneralSubRegs:$Rs16, u5_0Imm:$II, b30_2Imm:$Ii),
"p0 = cmp.eq($Rs16,#$II); if (p0.new) jump:nt $Ii",
-COMPOUND_CJ_ARCHDEPSLOT, TypeCJ>, Enc_7305764, PredRel {
+tc_d108a090, TypeCJ>, Enc_14d27a, PredRel {
let Inst{0-0} = 0b0;
let Inst{13-13} = 0b0;
let Inst{31-22} = 0b0001000000;
@@ -5996,7 +6050,7 @@ def J4_cmpeqi_tp0_jump_t : HInst<
(outs),
(ins GeneralSubRegs:$Rs16, u5_0Imm:$II, b30_2Imm:$Ii),
"p0 = cmp.eq($Rs16,#$II); if (p0.new) jump:t $Ii",
-COMPOUND_CJ_ARCHDEPSLOT, TypeCJ>, Enc_7305764, PredRel {
+tc_d108a090, TypeCJ>, Enc_14d27a, PredRel {
let Inst{0-0} = 0b0;
let Inst{13-13} = 0b1;
let Inst{31-22} = 0b0001000000;
@@ -6018,7 +6072,7 @@ def J4_cmpeqi_tp1_jump_nt : HInst<
(outs),
(ins GeneralSubRegs:$Rs16, u5_0Imm:$II, b30_2Imm:$Ii),
"p1 = cmp.eq($Rs16,#$II); if (p1.new) jump:nt $Ii",
-COMPOUND_CJ_ARCHDEPSLOT, TypeCJ>, Enc_7305764, PredRel {
+tc_d108a090, TypeCJ>, Enc_14d27a, PredRel {
let Inst{0-0} = 0b0;
let Inst{13-13} = 0b0;
let Inst{31-22} = 0b0001001000;
@@ -6040,7 +6094,7 @@ def J4_cmpeqi_tp1_jump_t : HInst<
(outs),
(ins GeneralSubRegs:$Rs16, u5_0Imm:$II, b30_2Imm:$Ii),
"p1 = cmp.eq($Rs16,#$II); if (p1.new) jump:t $Ii",
-COMPOUND_CJ_ARCHDEPSLOT, TypeCJ>, Enc_7305764, PredRel {
+tc_d108a090, TypeCJ>, Enc_14d27a, PredRel {
let Inst{0-0} = 0b0;
let Inst{13-13} = 0b1;
let Inst{31-22} = 0b0001001000;
@@ -6062,7 +6116,7 @@ def J4_cmpeqn1_f_jumpnv_nt : HInst<
(outs),
(ins IntRegs:$Ns8, n1Const:$n1, b30_2Imm:$Ii),
"if (!cmp.eq($Ns8.new,#$n1)) jump:nt $Ii",
-NCJ_tc_3or4stall_SLOT0, TypeNCJ>, Enc_4359901, PredRel {
+tc_09faec3b, TypeNCJ>, Enc_e90a15, PredRel {
let Inst{0-0} = 0b0;
let Inst{13-8} = 0b000000;
let Inst{19-19} = 0b0;
@@ -6087,7 +6141,7 @@ def J4_cmpeqn1_f_jumpnv_t : HInst<
(outs),
(ins IntRegs:$Ns8, n1Const:$n1, b30_2Imm:$Ii),
"if (!cmp.eq($Ns8.new,#$n1)) jump:t $Ii",
-NCJ_tc_3or4stall_SLOT0, TypeNCJ>, Enc_8612939, PredRel {
+tc_09faec3b, TypeNCJ>, Enc_5a18b3, PredRel {
let Inst{0-0} = 0b0;
let Inst{13-8} = 0b100000;
let Inst{19-19} = 0b0;
@@ -6112,7 +6166,7 @@ def J4_cmpeqn1_fp0_jump_nt : HInst<
(outs),
(ins GeneralSubRegs:$Rs16, n1Const:$n1, b30_2Imm:$Ii),
"p0 = cmp.eq($Rs16,#$n1); if (!p0.new) jump:nt $Ii",
-COMPOUND_CJ_ARCHDEPSLOT, TypeCJ>, Enc_844699, PredRel {
+tc_d108a090, TypeCJ>, Enc_1de724, PredRel {
let Inst{0-0} = 0b0;
let Inst{13-8} = 0b000000;
let Inst{31-22} = 0b0001000111;
@@ -6135,7 +6189,7 @@ def J4_cmpeqn1_fp0_jump_t : HInst<
(outs),
(ins GeneralSubRegs:$Rs16, n1Const:$n1, b30_2Imm:$Ii),
"p0 = cmp.eq($Rs16,#$n1); if (!p0.new) jump:t $Ii",
-COMPOUND_CJ_ARCHDEPSLOT, TypeCJ>, Enc_5338033, PredRel {
+tc_d108a090, TypeCJ>, Enc_14640c, PredRel {
let Inst{0-0} = 0b0;
let Inst{13-8} = 0b100000;
let Inst{31-22} = 0b0001000111;
@@ -6158,7 +6212,7 @@ def J4_cmpeqn1_fp1_jump_nt : HInst<
(outs),
(ins GeneralSubRegs:$Rs16, n1Const:$n1, b30_2Imm:$Ii),
"p1 = cmp.eq($Rs16,#$n1); if (!p1.new) jump:nt $Ii",
-COMPOUND_CJ_ARCHDEPSLOT, TypeCJ>, Enc_14150875, PredRel {
+tc_d108a090, TypeCJ>, Enc_668704, PredRel {
let Inst{0-0} = 0b0;
let Inst{13-8} = 0b000000;
let Inst{31-22} = 0b0001001111;
@@ -6181,7 +6235,7 @@ def J4_cmpeqn1_fp1_jump_t : HInst<
(outs),
(ins GeneralSubRegs:$Rs16, n1Const:$n1, b30_2Imm:$Ii),
"p1 = cmp.eq($Rs16,#$n1); if (!p1.new) jump:t $Ii",
-COMPOUND_CJ_ARCHDEPSLOT, TypeCJ>, Enc_15450971, PredRel {
+tc_d108a090, TypeCJ>, Enc_800e04, PredRel {
let Inst{0-0} = 0b0;
let Inst{13-8} = 0b100000;
let Inst{31-22} = 0b0001001111;
@@ -6204,7 +6258,7 @@ def J4_cmpeqn1_t_jumpnv_nt : HInst<
(outs),
(ins IntRegs:$Ns8, n1Const:$n1, b30_2Imm:$Ii),
"if (cmp.eq($Ns8.new,#$n1)) jump:nt $Ii",
-NCJ_tc_3or4stall_SLOT0, TypeNCJ>, Enc_14998517, PredRel {
+tc_09faec3b, TypeNCJ>, Enc_4aca3a, PredRel {
let Inst{0-0} = 0b0;
let Inst{13-8} = 0b000000;
let Inst{19-19} = 0b0;
@@ -6228,7 +6282,7 @@ def J4_cmpeqn1_t_jumpnv_t : HInst<
(outs),
(ins IntRegs:$Ns8, n1Const:$n1, b30_2Imm:$Ii),
"if (cmp.eq($Ns8.new,#$n1)) jump:t $Ii",
-NCJ_tc_3or4stall_SLOT0, TypeNCJ>, Enc_11544269, PredRel {
+tc_09faec3b, TypeNCJ>, Enc_f7ea77, PredRel {
let Inst{0-0} = 0b0;
let Inst{13-8} = 0b100000;
let Inst{19-19} = 0b0;
@@ -6252,7 +6306,7 @@ def J4_cmpeqn1_tp0_jump_nt : HInst<
(outs),
(ins GeneralSubRegs:$Rs16, n1Const:$n1, b30_2Imm:$Ii),
"p0 = cmp.eq($Rs16,#$n1); if (p0.new) jump:nt $Ii",
-COMPOUND_CJ_ARCHDEPSLOT, TypeCJ>, Enc_5401217, PredRel {
+tc_d108a090, TypeCJ>, Enc_405228, PredRel {
let Inst{0-0} = 0b0;
let Inst{13-8} = 0b000000;
let Inst{31-22} = 0b0001000110;
@@ -6274,7 +6328,7 @@ def J4_cmpeqn1_tp0_jump_t : HInst<
(outs),
(ins GeneralSubRegs:$Rs16, n1Const:$n1, b30_2Imm:$Ii),
"p0 = cmp.eq($Rs16,#$n1); if (p0.new) jump:t $Ii",
-COMPOUND_CJ_ARCHDEPSLOT, TypeCJ>, Enc_12419313, PredRel {
+tc_d108a090, TypeCJ>, Enc_3a2484, PredRel {
let Inst{0-0} = 0b0;
let Inst{13-8} = 0b100000;
let Inst{31-22} = 0b0001000110;
@@ -6296,7 +6350,7 @@ def J4_cmpeqn1_tp1_jump_nt : HInst<
(outs),
(ins GeneralSubRegs:$Rs16, n1Const:$n1, b30_2Imm:$Ii),
"p1 = cmp.eq($Rs16,#$n1); if (p1.new) jump:nt $Ii",
-COMPOUND_CJ_ARCHDEPSLOT, TypeCJ>, Enc_4684887, PredRel {
+tc_d108a090, TypeCJ>, Enc_736575, PredRel {
let Inst{0-0} = 0b0;
let Inst{13-8} = 0b000000;
let Inst{31-22} = 0b0001001110;
@@ -6318,7 +6372,7 @@ def J4_cmpeqn1_tp1_jump_t : HInst<
(outs),
(ins GeneralSubRegs:$Rs16, n1Const:$n1, b30_2Imm:$Ii),
"p1 = cmp.eq($Rs16,#$n1); if (p1.new) jump:t $Ii",
-COMPOUND_CJ_ARCHDEPSLOT, TypeCJ>, Enc_220949, PredRel {
+tc_d108a090, TypeCJ>, Enc_8e583a, PredRel {
let Inst{0-0} = 0b0;
let Inst{13-8} = 0b100000;
let Inst{31-22} = 0b0001001110;
@@ -6340,7 +6394,7 @@ def J4_cmpgt_f_jumpnv_nt : HInst<
(outs),
(ins IntRegs:$Ns8, IntRegs:$Rt32, b30_2Imm:$Ii),
"if (!cmp.gt($Ns8.new,$Rt32)) jump:nt $Ii",
-NCJ_tc_3or4stall_SLOT0, TypeNCJ>, Enc_15140689, PredRel {
+tc_580a779c, TypeNCJ>, Enc_c9a18e, PredRel {
let Inst{0-0} = 0b0;
let Inst{13-13} = 0b0;
let Inst{19-19} = 0b0;
@@ -6365,7 +6419,7 @@ def J4_cmpgt_f_jumpnv_t : HInst<
(outs),
(ins IntRegs:$Ns8, IntRegs:$Rt32, b30_2Imm:$Ii),
"if (!cmp.gt($Ns8.new,$Rt32)) jump:t $Ii",
-NCJ_tc_3or4stall_SLOT0, TypeNCJ>, Enc_15140689, PredRel {
+tc_580a779c, TypeNCJ>, Enc_c9a18e, PredRel {
let Inst{0-0} = 0b0;
let Inst{13-13} = 0b1;
let Inst{19-19} = 0b0;
@@ -6390,7 +6444,7 @@ def J4_cmpgt_fp0_jump_nt : HInst<
(outs),
(ins GeneralSubRegs:$Rs16, GeneralSubRegs:$Rt16, b30_2Imm:$Ii),
"p0 = cmp.gt($Rs16,$Rt16); if (!p0.new) jump:nt $Ii",
-COMPOUND_CJ_ARCHDEPSLOT, TypeCJ>, Enc_14264243, PredRel {
+tc_92d1833c, TypeCJ>, Enc_6a5972, PredRel {
let Inst{0-0} = 0b0;
let Inst{13-12} = 0b00;
let Inst{31-22} = 0b0001010011;
@@ -6413,7 +6467,7 @@ def J4_cmpgt_fp0_jump_t : HInst<
(outs),
(ins GeneralSubRegs:$Rs16, GeneralSubRegs:$Rt16, b30_2Imm:$Ii),
"p0 = cmp.gt($Rs16,$Rt16); if (!p0.new) jump:t $Ii",
-COMPOUND_CJ_ARCHDEPSLOT, TypeCJ>, Enc_14264243, PredRel {
+tc_92d1833c, TypeCJ>, Enc_6a5972, PredRel {
let Inst{0-0} = 0b0;
let Inst{13-12} = 0b10;
let Inst{31-22} = 0b0001010011;
@@ -6436,7 +6490,7 @@ def J4_cmpgt_fp1_jump_nt : HInst<
(outs),
(ins GeneralSubRegs:$Rs16, GeneralSubRegs:$Rt16, b30_2Imm:$Ii),
"p1 = cmp.gt($Rs16,$Rt16); if (!p1.new) jump:nt $Ii",
-COMPOUND_CJ_ARCHDEPSLOT, TypeCJ>, Enc_14264243, PredRel {
+tc_92d1833c, TypeCJ>, Enc_6a5972, PredRel {
let Inst{0-0} = 0b0;
let Inst{13-12} = 0b01;
let Inst{31-22} = 0b0001010011;
@@ -6459,7 +6513,7 @@ def J4_cmpgt_fp1_jump_t : HInst<
(outs),
(ins GeneralSubRegs:$Rs16, GeneralSubRegs:$Rt16, b30_2Imm:$Ii),
"p1 = cmp.gt($Rs16,$Rt16); if (!p1.new) jump:t $Ii",
-COMPOUND_CJ_ARCHDEPSLOT, TypeCJ>, Enc_14264243, PredRel {
+tc_92d1833c, TypeCJ>, Enc_6a5972, PredRel {
let Inst{0-0} = 0b0;
let Inst{13-12} = 0b11;
let Inst{31-22} = 0b0001010011;
@@ -6482,7 +6536,7 @@ def J4_cmpgt_t_jumpnv_nt : HInst<
(outs),
(ins IntRegs:$Ns8, IntRegs:$Rt32, b30_2Imm:$Ii),
"if (cmp.gt($Ns8.new,$Rt32)) jump:nt $Ii",
-NCJ_tc_3or4stall_SLOT0, TypeNCJ>, Enc_15140689, PredRel {
+tc_580a779c, TypeNCJ>, Enc_c9a18e, PredRel {
let Inst{0-0} = 0b0;
let Inst{13-13} = 0b0;
let Inst{19-19} = 0b0;
@@ -6506,7 +6560,7 @@ def J4_cmpgt_t_jumpnv_t : HInst<
(outs),
(ins IntRegs:$Ns8, IntRegs:$Rt32, b30_2Imm:$Ii),
"if (cmp.gt($Ns8.new,$Rt32)) jump:t $Ii",
-NCJ_tc_3or4stall_SLOT0, TypeNCJ>, Enc_15140689, PredRel {
+tc_580a779c, TypeNCJ>, Enc_c9a18e, PredRel {
let Inst{0-0} = 0b0;
let Inst{13-13} = 0b1;
let Inst{19-19} = 0b0;
@@ -6530,7 +6584,7 @@ def J4_cmpgt_tp0_jump_nt : HInst<
(outs),
(ins GeneralSubRegs:$Rs16, GeneralSubRegs:$Rt16, b30_2Imm:$Ii),
"p0 = cmp.gt($Rs16,$Rt16); if (p0.new) jump:nt $Ii",
-COMPOUND_CJ_ARCHDEPSLOT, TypeCJ>, Enc_14264243, PredRel {
+tc_92d1833c, TypeCJ>, Enc_6a5972, PredRel {
let Inst{0-0} = 0b0;
let Inst{13-12} = 0b00;
let Inst{31-22} = 0b0001010010;
@@ -6552,7 +6606,7 @@ def J4_cmpgt_tp0_jump_t : HInst<
(outs),
(ins GeneralSubRegs:$Rs16, GeneralSubRegs:$Rt16, b30_2Imm:$Ii),
"p0 = cmp.gt($Rs16,$Rt16); if (p0.new) jump:t $Ii",
-COMPOUND_CJ_ARCHDEPSLOT, TypeCJ>, Enc_14264243, PredRel {
+tc_92d1833c, TypeCJ>, Enc_6a5972, PredRel {
let Inst{0-0} = 0b0;
let Inst{13-12} = 0b10;
let Inst{31-22} = 0b0001010010;
@@ -6574,7 +6628,7 @@ def J4_cmpgt_tp1_jump_nt : HInst<
(outs),
(ins GeneralSubRegs:$Rs16, GeneralSubRegs:$Rt16, b30_2Imm:$Ii),
"p1 = cmp.gt($Rs16,$Rt16); if (p1.new) jump:nt $Ii",
-COMPOUND_CJ_ARCHDEPSLOT, TypeCJ>, Enc_14264243, PredRel {
+tc_92d1833c, TypeCJ>, Enc_6a5972, PredRel {
let Inst{0-0} = 0b0;
let Inst{13-12} = 0b01;
let Inst{31-22} = 0b0001010010;
@@ -6596,7 +6650,7 @@ def J4_cmpgt_tp1_jump_t : HInst<
(outs),
(ins GeneralSubRegs:$Rs16, GeneralSubRegs:$Rt16, b30_2Imm:$Ii),
"p1 = cmp.gt($Rs16,$Rt16); if (p1.new) jump:t $Ii",
-COMPOUND_CJ_ARCHDEPSLOT, TypeCJ>, Enc_14264243, PredRel {
+tc_92d1833c, TypeCJ>, Enc_6a5972, PredRel {
let Inst{0-0} = 0b0;
let Inst{13-12} = 0b11;
let Inst{31-22} = 0b0001010010;
@@ -6618,7 +6672,7 @@ def J4_cmpgti_f_jumpnv_nt : HInst<
(outs),
(ins IntRegs:$Ns8, u5_0Imm:$II, b30_2Imm:$Ii),
"if (!cmp.gt($Ns8.new,#$II)) jump:nt $Ii",
-NCJ_tc_3or4stall_SLOT0, TypeNCJ>, Enc_4397470, PredRel {
+tc_09faec3b, TypeNCJ>, Enc_eafd18, PredRel {
let Inst{0-0} = 0b0;
let Inst{13-13} = 0b0;
let Inst{19-19} = 0b0;
@@ -6643,7 +6697,7 @@ def J4_cmpgti_f_jumpnv_t : HInst<
(outs),
(ins IntRegs:$Ns8, u5_0Imm:$II, b30_2Imm:$Ii),
"if (!cmp.gt($Ns8.new,#$II)) jump:t $Ii",
-NCJ_tc_3or4stall_SLOT0, TypeNCJ>, Enc_4397470, PredRel {
+tc_09faec3b, TypeNCJ>, Enc_eafd18, PredRel {
let Inst{0-0} = 0b0;
let Inst{13-13} = 0b1;
let Inst{19-19} = 0b0;
@@ -6668,7 +6722,7 @@ def J4_cmpgti_fp0_jump_nt : HInst<
(outs),
(ins GeneralSubRegs:$Rs16, u5_0Imm:$II, b30_2Imm:$Ii),
"p0 = cmp.gt($Rs16,#$II); if (!p0.new) jump:nt $Ii",
-COMPOUND_CJ_ARCHDEPSLOT, TypeCJ>, Enc_7305764, PredRel {
+tc_d108a090, TypeCJ>, Enc_14d27a, PredRel {
let Inst{0-0} = 0b0;
let Inst{13-13} = 0b0;
let Inst{31-22} = 0b0001000011;
@@ -6691,7 +6745,7 @@ def J4_cmpgti_fp0_jump_t : HInst<
(outs),
(ins GeneralSubRegs:$Rs16, u5_0Imm:$II, b30_2Imm:$Ii),
"p0 = cmp.gt($Rs16,#$II); if (!p0.new) jump:t $Ii",
-COMPOUND_CJ_ARCHDEPSLOT, TypeCJ>, Enc_7305764, PredRel {
+tc_d108a090, TypeCJ>, Enc_14d27a, PredRel {
let Inst{0-0} = 0b0;
let Inst{13-13} = 0b1;
let Inst{31-22} = 0b0001000011;
@@ -6714,7 +6768,7 @@ def J4_cmpgti_fp1_jump_nt : HInst<
(outs),
(ins GeneralSubRegs:$Rs16, u5_0Imm:$II, b30_2Imm:$Ii),
"p1 = cmp.gt($Rs16,#$II); if (!p1.new) jump:nt $Ii",
-COMPOUND_CJ_ARCHDEPSLOT, TypeCJ>, Enc_7305764, PredRel {
+tc_d108a090, TypeCJ>, Enc_14d27a, PredRel {
let Inst{0-0} = 0b0;
let Inst{13-13} = 0b0;
let Inst{31-22} = 0b0001001011;
@@ -6737,7 +6791,7 @@ def J4_cmpgti_fp1_jump_t : HInst<
(outs),
(ins GeneralSubRegs:$Rs16, u5_0Imm:$II, b30_2Imm:$Ii),
"p1 = cmp.gt($Rs16,#$II); if (!p1.new) jump:t $Ii",
-COMPOUND_CJ_ARCHDEPSLOT, TypeCJ>, Enc_7305764, PredRel {
+tc_d108a090, TypeCJ>, Enc_14d27a, PredRel {
let Inst{0-0} = 0b0;
let Inst{13-13} = 0b1;
let Inst{31-22} = 0b0001001011;
@@ -6760,7 +6814,7 @@ def J4_cmpgti_t_jumpnv_nt : HInst<
(outs),
(ins IntRegs:$Ns8, u5_0Imm:$II, b30_2Imm:$Ii),
"if (cmp.gt($Ns8.new,#$II)) jump:nt $Ii",
-NCJ_tc_3or4stall_SLOT0, TypeNCJ>, Enc_4397470, PredRel {
+tc_09faec3b, TypeNCJ>, Enc_eafd18, PredRel {
let Inst{0-0} = 0b0;
let Inst{13-13} = 0b0;
let Inst{19-19} = 0b0;
@@ -6784,7 +6838,7 @@ def J4_cmpgti_t_jumpnv_t : HInst<
(outs),
(ins IntRegs:$Ns8, u5_0Imm:$II, b30_2Imm:$Ii),
"if (cmp.gt($Ns8.new,#$II)) jump:t $Ii",
-NCJ_tc_3or4stall_SLOT0, TypeNCJ>, Enc_4397470, PredRel {
+tc_09faec3b, TypeNCJ>, Enc_eafd18, PredRel {
let Inst{0-0} = 0b0;
let Inst{13-13} = 0b1;
let Inst{19-19} = 0b0;
@@ -6808,7 +6862,7 @@ def J4_cmpgti_tp0_jump_nt : HInst<
(outs),
(ins GeneralSubRegs:$Rs16, u5_0Imm:$II, b30_2Imm:$Ii),
"p0 = cmp.gt($Rs16,#$II); if (p0.new) jump:nt $Ii",
-COMPOUND_CJ_ARCHDEPSLOT, TypeCJ>, Enc_7305764, PredRel {
+tc_d108a090, TypeCJ>, Enc_14d27a, PredRel {
let Inst{0-0} = 0b0;
let Inst{13-13} = 0b0;
let Inst{31-22} = 0b0001000010;
@@ -6830,7 +6884,7 @@ def J4_cmpgti_tp0_jump_t : HInst<
(outs),
(ins GeneralSubRegs:$Rs16, u5_0Imm:$II, b30_2Imm:$Ii),
"p0 = cmp.gt($Rs16,#$II); if (p0.new) jump:t $Ii",
-COMPOUND_CJ_ARCHDEPSLOT, TypeCJ>, Enc_7305764, PredRel {
+tc_d108a090, TypeCJ>, Enc_14d27a, PredRel {
let Inst{0-0} = 0b0;
let Inst{13-13} = 0b1;
let Inst{31-22} = 0b0001000010;
@@ -6852,7 +6906,7 @@ def J4_cmpgti_tp1_jump_nt : HInst<
(outs),
(ins GeneralSubRegs:$Rs16, u5_0Imm:$II, b30_2Imm:$Ii),
"p1 = cmp.gt($Rs16,#$II); if (p1.new) jump:nt $Ii",
-COMPOUND_CJ_ARCHDEPSLOT, TypeCJ>, Enc_7305764, PredRel {
+tc_d108a090, TypeCJ>, Enc_14d27a, PredRel {
let Inst{0-0} = 0b0;
let Inst{13-13} = 0b0;
let Inst{31-22} = 0b0001001010;
@@ -6874,7 +6928,7 @@ def J4_cmpgti_tp1_jump_t : HInst<
(outs),
(ins GeneralSubRegs:$Rs16, u5_0Imm:$II, b30_2Imm:$Ii),
"p1 = cmp.gt($Rs16,#$II); if (p1.new) jump:t $Ii",
-COMPOUND_CJ_ARCHDEPSLOT, TypeCJ>, Enc_7305764, PredRel {
+tc_d108a090, TypeCJ>, Enc_14d27a, PredRel {
let Inst{0-0} = 0b0;
let Inst{13-13} = 0b1;
let Inst{31-22} = 0b0001001010;
@@ -6896,7 +6950,7 @@ def J4_cmpgtn1_f_jumpnv_nt : HInst<
(outs),
(ins IntRegs:$Ns8, n1Const:$n1, b30_2Imm:$Ii),
"if (!cmp.gt($Ns8.new,#$n1)) jump:nt $Ii",
-NCJ_tc_3or4stall_SLOT0, TypeNCJ>, Enc_8674673, PredRel {
+tc_09faec3b, TypeNCJ>, Enc_3694bd, PredRel {
let Inst{0-0} = 0b0;
let Inst{13-8} = 0b000000;
let Inst{19-19} = 0b0;
@@ -6921,7 +6975,7 @@ def J4_cmpgtn1_f_jumpnv_t : HInst<
(outs),
(ins IntRegs:$Ns8, n1Const:$n1, b30_2Imm:$Ii),
"if (!cmp.gt($Ns8.new,#$n1)) jump:t $Ii",
-NCJ_tc_3or4stall_SLOT0, TypeNCJ>, Enc_15763937, PredRel {
+tc_09faec3b, TypeNCJ>, Enc_a6853f, PredRel {
let Inst{0-0} = 0b0;
let Inst{13-8} = 0b100000;
let Inst{19-19} = 0b0;
@@ -6946,7 +7000,7 @@ def J4_cmpgtn1_fp0_jump_nt : HInst<
(outs),
(ins GeneralSubRegs:$Rs16, n1Const:$n1, b30_2Imm:$Ii),
"p0 = cmp.gt($Rs16,#$n1); if (!p0.new) jump:nt $Ii",
-COMPOUND_CJ_ARCHDEPSLOT, TypeCJ>, Enc_5915771, PredRel {
+tc_d108a090, TypeCJ>, Enc_a42857, PredRel {
let Inst{0-0} = 0b0;
let Inst{13-8} = 0b000001;
let Inst{31-22} = 0b0001000111;
@@ -6969,7 +7023,7 @@ def J4_cmpgtn1_fp0_jump_t : HInst<
(outs),
(ins GeneralSubRegs:$Rs16, n1Const:$n1, b30_2Imm:$Ii),
"p0 = cmp.gt($Rs16,#$n1); if (!p0.new) jump:t $Ii",
-COMPOUND_CJ_ARCHDEPSLOT, TypeCJ>, Enc_7315939, PredRel {
+tc_d108a090, TypeCJ>, Enc_f6fe0b, PredRel {
let Inst{0-0} = 0b0;
let Inst{13-8} = 0b100001;
let Inst{31-22} = 0b0001000111;
@@ -6992,7 +7046,7 @@ def J4_cmpgtn1_fp1_jump_nt : HInst<
(outs),
(ins GeneralSubRegs:$Rs16, n1Const:$n1, b30_2Imm:$Ii),
"p1 = cmp.gt($Rs16,#$n1); if (!p1.new) jump:nt $Ii",
-COMPOUND_CJ_ARCHDEPSLOT, TypeCJ>, Enc_7785569, PredRel {
+tc_d108a090, TypeCJ>, Enc_3e3989, PredRel {
let Inst{0-0} = 0b0;
let Inst{13-8} = 0b000001;
let Inst{31-22} = 0b0001001111;
@@ -7015,7 +7069,7 @@ def J4_cmpgtn1_fp1_jump_t : HInst<
(outs),
(ins GeneralSubRegs:$Rs16, n1Const:$n1, b30_2Imm:$Ii),
"p1 = cmp.gt($Rs16,#$n1); if (!p1.new) jump:t $Ii",
-COMPOUND_CJ_ARCHDEPSLOT, TypeCJ>, Enc_10968391, PredRel {
+tc_d108a090, TypeCJ>, Enc_b909d2, PredRel {
let Inst{0-0} = 0b0;
let Inst{13-8} = 0b100001;
let Inst{31-22} = 0b0001001111;
@@ -7038,7 +7092,7 @@ def J4_cmpgtn1_t_jumpnv_nt : HInst<
(outs),
(ins IntRegs:$Ns8, n1Const:$n1, b30_2Imm:$Ii),
"if (cmp.gt($Ns8.new,#$n1)) jump:nt $Ii",
-NCJ_tc_3or4stall_SLOT0, TypeNCJ>, Enc_364753, PredRel {
+tc_09faec3b, TypeNCJ>, Enc_f82302, PredRel {
let Inst{0-0} = 0b0;
let Inst{13-8} = 0b000000;
let Inst{19-19} = 0b0;
@@ -7062,7 +7116,7 @@ def J4_cmpgtn1_t_jumpnv_t : HInst<
(outs),
(ins IntRegs:$Ns8, n1Const:$n1, b30_2Imm:$Ii),
"if (cmp.gt($Ns8.new,#$n1)) jump:t $Ii",
-NCJ_tc_3or4stall_SLOT0, TypeNCJ>, Enc_8479583, PredRel {
+tc_09faec3b, TypeNCJ>, Enc_6413b6, PredRel {
let Inst{0-0} = 0b0;
let Inst{13-8} = 0b100000;
let Inst{19-19} = 0b0;
@@ -7086,7 +7140,7 @@ def J4_cmpgtn1_tp0_jump_nt : HInst<
(outs),
(ins GeneralSubRegs:$Rs16, n1Const:$n1, b30_2Imm:$Ii),
"p0 = cmp.gt($Rs16,#$n1); if (p0.new) jump:nt $Ii",
-COMPOUND_CJ_ARCHDEPSLOT, TypeCJ>, Enc_2428539, PredRel {
+tc_d108a090, TypeCJ>, Enc_b78edd, PredRel {
let Inst{0-0} = 0b0;
let Inst{13-8} = 0b000001;
let Inst{31-22} = 0b0001000110;
@@ -7108,7 +7162,7 @@ def J4_cmpgtn1_tp0_jump_t : HInst<
(outs),
(ins GeneralSubRegs:$Rs16, n1Const:$n1, b30_2Imm:$Ii),
"p0 = cmp.gt($Rs16,#$n1); if (p0.new) jump:t $Ii",
-COMPOUND_CJ_ARCHDEPSLOT, TypeCJ>, Enc_8919369, PredRel {
+tc_d108a090, TypeCJ>, Enc_041d7b, PredRel {
let Inst{0-0} = 0b0;
let Inst{13-8} = 0b100001;
let Inst{31-22} = 0b0001000110;
@@ -7130,7 +7184,7 @@ def J4_cmpgtn1_tp1_jump_nt : HInst<
(outs),
(ins GeneralSubRegs:$Rs16, n1Const:$n1, b30_2Imm:$Ii),
"p1 = cmp.gt($Rs16,#$n1); if (p1.new) jump:nt $Ii",
-COMPOUND_CJ_ARCHDEPSLOT, TypeCJ>, Enc_8577055, PredRel {
+tc_d108a090, TypeCJ>, Enc_b1e1fb, PredRel {
let Inst{0-0} = 0b0;
let Inst{13-8} = 0b000001;
let Inst{31-22} = 0b0001001110;
@@ -7152,7 +7206,7 @@ def J4_cmpgtn1_tp1_jump_t : HInst<
(outs),
(ins GeneralSubRegs:$Rs16, n1Const:$n1, b30_2Imm:$Ii),
"p1 = cmp.gt($Rs16,#$n1); if (p1.new) jump:t $Ii",
-COMPOUND_CJ_ARCHDEPSLOT, TypeCJ>, Enc_14530015, PredRel {
+tc_d108a090, TypeCJ>, Enc_178717, PredRel {
let Inst{0-0} = 0b0;
let Inst{13-8} = 0b100001;
let Inst{31-22} = 0b0001001110;
@@ -7174,7 +7228,7 @@ def J4_cmpgtu_f_jumpnv_nt : HInst<
(outs),
(ins IntRegs:$Ns8, IntRegs:$Rt32, b30_2Imm:$Ii),
"if (!cmp.gtu($Ns8.new,$Rt32)) jump:nt $Ii",
-NCJ_tc_3or4stall_SLOT0, TypeNCJ>, Enc_15140689, PredRel {
+tc_580a779c, TypeNCJ>, Enc_c9a18e, PredRel {
let Inst{0-0} = 0b0;
let Inst{13-13} = 0b0;
let Inst{19-19} = 0b0;
@@ -7199,7 +7253,7 @@ def J4_cmpgtu_f_jumpnv_t : HInst<
(outs),
(ins IntRegs:$Ns8, IntRegs:$Rt32, b30_2Imm:$Ii),
"if (!cmp.gtu($Ns8.new,$Rt32)) jump:t $Ii",
-NCJ_tc_3or4stall_SLOT0, TypeNCJ>, Enc_15140689, PredRel {
+tc_580a779c, TypeNCJ>, Enc_c9a18e, PredRel {
let Inst{0-0} = 0b0;
let Inst{13-13} = 0b1;
let Inst{19-19} = 0b0;
@@ -7224,7 +7278,7 @@ def J4_cmpgtu_fp0_jump_nt : HInst<
(outs),
(ins GeneralSubRegs:$Rs16, GeneralSubRegs:$Rt16, b30_2Imm:$Ii),
"p0 = cmp.gtu($Rs16,$Rt16); if (!p0.new) jump:nt $Ii",
-COMPOUND_CJ_ARCHDEPSLOT, TypeCJ>, Enc_14264243, PredRel {
+tc_92d1833c, TypeCJ>, Enc_6a5972, PredRel {
let Inst{0-0} = 0b0;
let Inst{13-12} = 0b00;
let Inst{31-22} = 0b0001010101;
@@ -7247,7 +7301,7 @@ def J4_cmpgtu_fp0_jump_t : HInst<
(outs),
(ins GeneralSubRegs:$Rs16, GeneralSubRegs:$Rt16, b30_2Imm:$Ii),
"p0 = cmp.gtu($Rs16,$Rt16); if (!p0.new) jump:t $Ii",
-COMPOUND_CJ_ARCHDEPSLOT, TypeCJ>, Enc_14264243, PredRel {
+tc_92d1833c, TypeCJ>, Enc_6a5972, PredRel {
let Inst{0-0} = 0b0;
let Inst{13-12} = 0b10;
let Inst{31-22} = 0b0001010101;
@@ -7270,7 +7324,7 @@ def J4_cmpgtu_fp1_jump_nt : HInst<
(outs),
(ins GeneralSubRegs:$Rs16, GeneralSubRegs:$Rt16, b30_2Imm:$Ii),
"p1 = cmp.gtu($Rs16,$Rt16); if (!p1.new) jump:nt $Ii",
-COMPOUND_CJ_ARCHDEPSLOT, TypeCJ>, Enc_14264243, PredRel {
+tc_92d1833c, TypeCJ>, Enc_6a5972, PredRel {
let Inst{0-0} = 0b0;
let Inst{13-12} = 0b01;
let Inst{31-22} = 0b0001010101;
@@ -7293,7 +7347,7 @@ def J4_cmpgtu_fp1_jump_t : HInst<
(outs),
(ins GeneralSubRegs:$Rs16, GeneralSubRegs:$Rt16, b30_2Imm:$Ii),
"p1 = cmp.gtu($Rs16,$Rt16); if (!p1.new) jump:t $Ii",
-COMPOUND_CJ_ARCHDEPSLOT, TypeCJ>, Enc_14264243, PredRel {
+tc_92d1833c, TypeCJ>, Enc_6a5972, PredRel {
let Inst{0-0} = 0b0;
let Inst{13-12} = 0b11;
let Inst{31-22} = 0b0001010101;
@@ -7316,7 +7370,7 @@ def J4_cmpgtu_t_jumpnv_nt : HInst<
(outs),
(ins IntRegs:$Ns8, IntRegs:$Rt32, b30_2Imm:$Ii),
"if (cmp.gtu($Ns8.new,$Rt32)) jump:nt $Ii",
-NCJ_tc_3or4stall_SLOT0, TypeNCJ>, Enc_15140689, PredRel {
+tc_580a779c, TypeNCJ>, Enc_c9a18e, PredRel {
let Inst{0-0} = 0b0;
let Inst{13-13} = 0b0;
let Inst{19-19} = 0b0;
@@ -7340,7 +7394,7 @@ def J4_cmpgtu_t_jumpnv_t : HInst<
(outs),
(ins IntRegs:$Ns8, IntRegs:$Rt32, b30_2Imm:$Ii),
"if (cmp.gtu($Ns8.new,$Rt32)) jump:t $Ii",
-NCJ_tc_3or4stall_SLOT0, TypeNCJ>, Enc_15140689, PredRel {
+tc_580a779c, TypeNCJ>, Enc_c9a18e, PredRel {
let Inst{0-0} = 0b0;
let Inst{13-13} = 0b1;
let Inst{19-19} = 0b0;
@@ -7364,7 +7418,7 @@ def J4_cmpgtu_tp0_jump_nt : HInst<
(outs),
(ins GeneralSubRegs:$Rs16, GeneralSubRegs:$Rt16, b30_2Imm:$Ii),
"p0 = cmp.gtu($Rs16,$Rt16); if (p0.new) jump:nt $Ii",
-COMPOUND_CJ_ARCHDEPSLOT, TypeCJ>, Enc_14264243, PredRel {
+tc_92d1833c, TypeCJ>, Enc_6a5972, PredRel {
let Inst{0-0} = 0b0;
let Inst{13-12} = 0b00;
let Inst{31-22} = 0b0001010100;
@@ -7386,7 +7440,7 @@ def J4_cmpgtu_tp0_jump_t : HInst<
(outs),
(ins GeneralSubRegs:$Rs16, GeneralSubRegs:$Rt16, b30_2Imm:$Ii),
"p0 = cmp.gtu($Rs16,$Rt16); if (p0.new) jump:t $Ii",
-COMPOUND_CJ_ARCHDEPSLOT, TypeCJ>, Enc_14264243, PredRel {
+tc_92d1833c, TypeCJ>, Enc_6a5972, PredRel {
let Inst{0-0} = 0b0;
let Inst{13-12} = 0b10;
let Inst{31-22} = 0b0001010100;
@@ -7408,7 +7462,7 @@ def J4_cmpgtu_tp1_jump_nt : HInst<
(outs),
(ins GeneralSubRegs:$Rs16, GeneralSubRegs:$Rt16, b30_2Imm:$Ii),
"p1 = cmp.gtu($Rs16,$Rt16); if (p1.new) jump:nt $Ii",
-COMPOUND_CJ_ARCHDEPSLOT, TypeCJ>, Enc_14264243, PredRel {
+tc_92d1833c, TypeCJ>, Enc_6a5972, PredRel {
let Inst{0-0} = 0b0;
let Inst{13-12} = 0b01;
let Inst{31-22} = 0b0001010100;
@@ -7430,7 +7484,7 @@ def J4_cmpgtu_tp1_jump_t : HInst<
(outs),
(ins GeneralSubRegs:$Rs16, GeneralSubRegs:$Rt16, b30_2Imm:$Ii),
"p1 = cmp.gtu($Rs16,$Rt16); if (p1.new) jump:t $Ii",
-COMPOUND_CJ_ARCHDEPSLOT, TypeCJ>, Enc_14264243, PredRel {
+tc_92d1833c, TypeCJ>, Enc_6a5972, PredRel {
let Inst{0-0} = 0b0;
let Inst{13-12} = 0b11;
let Inst{31-22} = 0b0001010100;
@@ -7452,7 +7506,7 @@ def J4_cmpgtui_f_jumpnv_nt : HInst<
(outs),
(ins IntRegs:$Ns8, u5_0Imm:$II, b30_2Imm:$Ii),
"if (!cmp.gtu($Ns8.new,#$II)) jump:nt $Ii",
-NCJ_tc_3or4stall_SLOT0, TypeNCJ>, Enc_4397470, PredRel {
+tc_09faec3b, TypeNCJ>, Enc_eafd18, PredRel {
let Inst{0-0} = 0b0;
let Inst{13-13} = 0b0;
let Inst{19-19} = 0b0;
@@ -7477,7 +7531,7 @@ def J4_cmpgtui_f_jumpnv_t : HInst<
(outs),
(ins IntRegs:$Ns8, u5_0Imm:$II, b30_2Imm:$Ii),
"if (!cmp.gtu($Ns8.new,#$II)) jump:t $Ii",
-NCJ_tc_3or4stall_SLOT0, TypeNCJ>, Enc_4397470, PredRel {
+tc_09faec3b, TypeNCJ>, Enc_eafd18, PredRel {
let Inst{0-0} = 0b0;
let Inst{13-13} = 0b1;
let Inst{19-19} = 0b0;
@@ -7502,7 +7556,7 @@ def J4_cmpgtui_fp0_jump_nt : HInst<
(outs),
(ins GeneralSubRegs:$Rs16, u5_0Imm:$II, b30_2Imm:$Ii),
"p0 = cmp.gtu($Rs16,#$II); if (!p0.new) jump:nt $Ii",
-COMPOUND_CJ_ARCHDEPSLOT, TypeCJ>, Enc_7305764, PredRel {
+tc_d108a090, TypeCJ>, Enc_14d27a, PredRel {
let Inst{0-0} = 0b0;
let Inst{13-13} = 0b0;
let Inst{31-22} = 0b0001000101;
@@ -7525,7 +7579,7 @@ def J4_cmpgtui_fp0_jump_t : HInst<
(outs),
(ins GeneralSubRegs:$Rs16, u5_0Imm:$II, b30_2Imm:$Ii),
"p0 = cmp.gtu($Rs16,#$II); if (!p0.new) jump:t $Ii",
-COMPOUND_CJ_ARCHDEPSLOT, TypeCJ>, Enc_7305764, PredRel {
+tc_d108a090, TypeCJ>, Enc_14d27a, PredRel {
let Inst{0-0} = 0b0;
let Inst{13-13} = 0b1;
let Inst{31-22} = 0b0001000101;
@@ -7548,7 +7602,7 @@ def J4_cmpgtui_fp1_jump_nt : HInst<
(outs),
(ins GeneralSubRegs:$Rs16, u5_0Imm:$II, b30_2Imm:$Ii),
"p1 = cmp.gtu($Rs16,#$II); if (!p1.new) jump:nt $Ii",
-COMPOUND_CJ_ARCHDEPSLOT, TypeCJ>, Enc_7305764, PredRel {
+tc_d108a090, TypeCJ>, Enc_14d27a, PredRel {
let Inst{0-0} = 0b0;
let Inst{13-13} = 0b0;
let Inst{31-22} = 0b0001001101;
@@ -7571,7 +7625,7 @@ def J4_cmpgtui_fp1_jump_t : HInst<
(outs),
(ins GeneralSubRegs:$Rs16, u5_0Imm:$II, b30_2Imm:$Ii),
"p1 = cmp.gtu($Rs16,#$II); if (!p1.new) jump:t $Ii",
-COMPOUND_CJ_ARCHDEPSLOT, TypeCJ>, Enc_7305764, PredRel {
+tc_d108a090, TypeCJ>, Enc_14d27a, PredRel {
let Inst{0-0} = 0b0;
let Inst{13-13} = 0b1;
let Inst{31-22} = 0b0001001101;
@@ -7594,7 +7648,7 @@ def J4_cmpgtui_t_jumpnv_nt : HInst<
(outs),
(ins IntRegs:$Ns8, u5_0Imm:$II, b30_2Imm:$Ii),
"if (cmp.gtu($Ns8.new,#$II)) jump:nt $Ii",
-NCJ_tc_3or4stall_SLOT0, TypeNCJ>, Enc_4397470, PredRel {
+tc_09faec3b, TypeNCJ>, Enc_eafd18, PredRel {
let Inst{0-0} = 0b0;
let Inst{13-13} = 0b0;
let Inst{19-19} = 0b0;
@@ -7618,7 +7672,7 @@ def J4_cmpgtui_t_jumpnv_t : HInst<
(outs),
(ins IntRegs:$Ns8, u5_0Imm:$II, b30_2Imm:$Ii),
"if (cmp.gtu($Ns8.new,#$II)) jump:t $Ii",
-NCJ_tc_3or4stall_SLOT0, TypeNCJ>, Enc_4397470, PredRel {
+tc_09faec3b, TypeNCJ>, Enc_eafd18, PredRel {
let Inst{0-0} = 0b0;
let Inst{13-13} = 0b1;
let Inst{19-19} = 0b0;
@@ -7642,7 +7696,7 @@ def J4_cmpgtui_tp0_jump_nt : HInst<
(outs),
(ins GeneralSubRegs:$Rs16, u5_0Imm:$II, b30_2Imm:$Ii),
"p0 = cmp.gtu($Rs16,#$II); if (p0.new) jump:nt $Ii",
-COMPOUND_CJ_ARCHDEPSLOT, TypeCJ>, Enc_7305764, PredRel {
+tc_d108a090, TypeCJ>, Enc_14d27a, PredRel {
let Inst{0-0} = 0b0;
let Inst{13-13} = 0b0;
let Inst{31-22} = 0b0001000100;
@@ -7664,7 +7718,7 @@ def J4_cmpgtui_tp0_jump_t : HInst<
(outs),
(ins GeneralSubRegs:$Rs16, u5_0Imm:$II, b30_2Imm:$Ii),
"p0 = cmp.gtu($Rs16,#$II); if (p0.new) jump:t $Ii",
-COMPOUND_CJ_ARCHDEPSLOT, TypeCJ>, Enc_7305764, PredRel {
+tc_d108a090, TypeCJ>, Enc_14d27a, PredRel {
let Inst{0-0} = 0b0;
let Inst{13-13} = 0b1;
let Inst{31-22} = 0b0001000100;
@@ -7686,7 +7740,7 @@ def J4_cmpgtui_tp1_jump_nt : HInst<
(outs),
(ins GeneralSubRegs:$Rs16, u5_0Imm:$II, b30_2Imm:$Ii),
"p1 = cmp.gtu($Rs16,#$II); if (p1.new) jump:nt $Ii",
-COMPOUND_CJ_ARCHDEPSLOT, TypeCJ>, Enc_7305764, PredRel {
+tc_d108a090, TypeCJ>, Enc_14d27a, PredRel {
let Inst{0-0} = 0b0;
let Inst{13-13} = 0b0;
let Inst{31-22} = 0b0001001100;
@@ -7708,7 +7762,7 @@ def J4_cmpgtui_tp1_jump_t : HInst<
(outs),
(ins GeneralSubRegs:$Rs16, u5_0Imm:$II, b30_2Imm:$Ii),
"p1 = cmp.gtu($Rs16,#$II); if (p1.new) jump:t $Ii",
-COMPOUND_CJ_ARCHDEPSLOT, TypeCJ>, Enc_7305764, PredRel {
+tc_d108a090, TypeCJ>, Enc_14d27a, PredRel {
let Inst{0-0} = 0b0;
let Inst{13-13} = 0b1;
let Inst{31-22} = 0b0001001100;
@@ -7730,7 +7784,7 @@ def J4_cmplt_f_jumpnv_nt : HInst<
(outs),
(ins IntRegs:$Rt32, IntRegs:$Ns8, b30_2Imm:$Ii),
"if (!cmp.gt($Rt32,$Ns8.new)) jump:nt $Ii",
-NCJ_tc_3or4stall_SLOT0, TypeNCJ>, Enc_6730375, PredRel {
+tc_3e61d314, TypeNCJ>, Enc_5de85f, PredRel {
let Inst{0-0} = 0b0;
let Inst{13-13} = 0b0;
let Inst{19-19} = 0b0;
@@ -7755,7 +7809,7 @@ def J4_cmplt_f_jumpnv_t : HInst<
(outs),
(ins IntRegs:$Rt32, IntRegs:$Ns8, b30_2Imm:$Ii),
"if (!cmp.gt($Rt32,$Ns8.new)) jump:t $Ii",
-NCJ_tc_3or4stall_SLOT0, TypeNCJ>, Enc_6730375, PredRel {
+tc_3e61d314, TypeNCJ>, Enc_5de85f, PredRel {
let Inst{0-0} = 0b0;
let Inst{13-13} = 0b1;
let Inst{19-19} = 0b0;
@@ -7780,7 +7834,7 @@ def J4_cmplt_t_jumpnv_nt : HInst<
(outs),
(ins IntRegs:$Rt32, IntRegs:$Ns8, b30_2Imm:$Ii),
"if (cmp.gt($Rt32,$Ns8.new)) jump:nt $Ii",
-NCJ_tc_3or4stall_SLOT0, TypeNCJ>, Enc_6730375, PredRel {
+tc_3e61d314, TypeNCJ>, Enc_5de85f, PredRel {
let Inst{0-0} = 0b0;
let Inst{13-13} = 0b0;
let Inst{19-19} = 0b0;
@@ -7804,7 +7858,7 @@ def J4_cmplt_t_jumpnv_t : HInst<
(outs),
(ins IntRegs:$Rt32, IntRegs:$Ns8, b30_2Imm:$Ii),
"if (cmp.gt($Rt32,$Ns8.new)) jump:t $Ii",
-NCJ_tc_3or4stall_SLOT0, TypeNCJ>, Enc_6730375, PredRel {
+tc_3e61d314, TypeNCJ>, Enc_5de85f, PredRel {
let Inst{0-0} = 0b0;
let Inst{13-13} = 0b1;
let Inst{19-19} = 0b0;
@@ -7828,7 +7882,7 @@ def J4_cmpltu_f_jumpnv_nt : HInst<
(outs),
(ins IntRegs:$Rt32, IntRegs:$Ns8, b30_2Imm:$Ii),
"if (!cmp.gtu($Rt32,$Ns8.new)) jump:nt $Ii",
-NCJ_tc_3or4stall_SLOT0, TypeNCJ>, Enc_6730375, PredRel {
+tc_3e61d314, TypeNCJ>, Enc_5de85f, PredRel {
let Inst{0-0} = 0b0;
let Inst{13-13} = 0b0;
let Inst{19-19} = 0b0;
@@ -7853,7 +7907,7 @@ def J4_cmpltu_f_jumpnv_t : HInst<
(outs),
(ins IntRegs:$Rt32, IntRegs:$Ns8, b30_2Imm:$Ii),
"if (!cmp.gtu($Rt32,$Ns8.new)) jump:t $Ii",
-NCJ_tc_3or4stall_SLOT0, TypeNCJ>, Enc_6730375, PredRel {
+tc_3e61d314, TypeNCJ>, Enc_5de85f, PredRel {
let Inst{0-0} = 0b0;
let Inst{13-13} = 0b1;
let Inst{19-19} = 0b0;
@@ -7878,7 +7932,7 @@ def J4_cmpltu_t_jumpnv_nt : HInst<
(outs),
(ins IntRegs:$Rt32, IntRegs:$Ns8, b30_2Imm:$Ii),
"if (cmp.gtu($Rt32,$Ns8.new)) jump:nt $Ii",
-NCJ_tc_3or4stall_SLOT0, TypeNCJ>, Enc_6730375, PredRel {
+tc_3e61d314, TypeNCJ>, Enc_5de85f, PredRel {
let Inst{0-0} = 0b0;
let Inst{13-13} = 0b0;
let Inst{19-19} = 0b0;
@@ -7902,7 +7956,7 @@ def J4_cmpltu_t_jumpnv_t : HInst<
(outs),
(ins IntRegs:$Rt32, IntRegs:$Ns8, b30_2Imm:$Ii),
"if (cmp.gtu($Rt32,$Ns8.new)) jump:t $Ii",
-NCJ_tc_3or4stall_SLOT0, TypeNCJ>, Enc_6730375, PredRel {
+tc_3e61d314, TypeNCJ>, Enc_5de85f, PredRel {
let Inst{0-0} = 0b0;
let Inst{13-13} = 0b1;
let Inst{19-19} = 0b0;
@@ -7926,7 +7980,7 @@ def J4_hintjumpr : HInst<
(outs),
(ins IntRegs:$Rs32),
"hintjr($Rs32)",
-J_tc_2early_SLOT2, TypeJ>, Enc_11704059 {
+tc_b08b653e, TypeJ>, Enc_ecbcc8 {
let Inst{13-0} = 0b00000000000000;
let Inst{31-21} = 0b01010010101;
let isTerminator = 1;
@@ -7938,7 +7992,7 @@ def J4_jumpseti : HInst<
(outs GeneralSubRegs:$Rd16),
(ins u6_0Imm:$II, b30_2Imm:$Ii),
"$Rd16 = #$II ; jump $Ii",
-COMPOUND, TypeCJ>, Enc_4834775 {
+tc_1e062b18, TypeCJ>, Enc_9e4c3f {
let Inst{0-0} = 0b0;
let Inst{31-22} = 0b0001011000;
let hasNewValue = 1;
@@ -7956,7 +8010,7 @@ def J4_jumpsetr : HInst<
(outs GeneralSubRegs:$Rd16),
(ins GeneralSubRegs:$Rs16, b30_2Imm:$Ii),
"$Rd16 = $Rs16 ; jump $Ii",
-COMPOUND, TypeCJ>, Enc_2639299 {
+tc_1e062b18, TypeCJ>, Enc_66bce1 {
let Inst{0-0} = 0b0;
let Inst{13-12} = 0b00;
let Inst{31-22} = 0b0001011100;
@@ -7975,7 +8029,7 @@ def J4_tstbit0_f_jumpnv_nt : HInst<
(outs),
(ins IntRegs:$Ns8, b30_2Imm:$Ii),
"if (!tstbit($Ns8.new,#0)) jump:nt $Ii",
-NCJ_tc_3or4stall_SLOT0, TypeNCJ>, Enc_1898420 {
+tc_dbe218dd, TypeNCJ>, Enc_69d63b {
let Inst{0-0} = 0b0;
let Inst{13-8} = 0b000000;
let Inst{19-19} = 0b0;
@@ -7999,7 +8053,7 @@ def J4_tstbit0_f_jumpnv_t : HInst<
(outs),
(ins IntRegs:$Ns8, b30_2Imm:$Ii),
"if (!tstbit($Ns8.new,#0)) jump:t $Ii",
-NCJ_tc_3or4stall_SLOT0, TypeNCJ>, Enc_1898420 {
+tc_dbe218dd, TypeNCJ>, Enc_69d63b {
let Inst{0-0} = 0b0;
let Inst{13-8} = 0b100000;
let Inst{19-19} = 0b0;
@@ -8023,7 +8077,7 @@ def J4_tstbit0_fp0_jump_nt : HInst<
(outs),
(ins GeneralSubRegs:$Rs16, b30_2Imm:$Ii),
"p0 = tstbit($Rs16,#0); if (!p0.new) jump:nt $Ii",
-COMPOUND_CJ_ARCHDEPSLOT, TypeCJ>, Enc_12829314 {
+tc_eb07ef6f, TypeCJ>, Enc_ad1c74 {
let Inst{0-0} = 0b0;
let Inst{13-8} = 0b000011;
let Inst{31-22} = 0b0001000111;
@@ -8045,7 +8099,7 @@ def J4_tstbit0_fp0_jump_t : HInst<
(outs),
(ins GeneralSubRegs:$Rs16, b30_2Imm:$Ii),
"p0 = tstbit($Rs16,#0); if (!p0.new) jump:t $Ii",
-COMPOUND_CJ_ARCHDEPSLOT, TypeCJ>, Enc_12829314 {
+tc_eb07ef6f, TypeCJ>, Enc_ad1c74 {
let Inst{0-0} = 0b0;
let Inst{13-8} = 0b100011;
let Inst{31-22} = 0b0001000111;
@@ -8067,7 +8121,7 @@ def J4_tstbit0_fp1_jump_nt : HInst<
(outs),
(ins GeneralSubRegs:$Rs16, b30_2Imm:$Ii),
"p1 = tstbit($Rs16,#0); if (!p1.new) jump:nt $Ii",
-COMPOUND_CJ_ARCHDEPSLOT, TypeCJ>, Enc_12829314 {
+tc_eb07ef6f, TypeCJ>, Enc_ad1c74 {
let Inst{0-0} = 0b0;
let Inst{13-8} = 0b000011;
let Inst{31-22} = 0b0001001111;
@@ -8089,7 +8143,7 @@ def J4_tstbit0_fp1_jump_t : HInst<
(outs),
(ins GeneralSubRegs:$Rs16, b30_2Imm:$Ii),
"p1 = tstbit($Rs16,#0); if (!p1.new) jump:t $Ii",
-COMPOUND_CJ_ARCHDEPSLOT, TypeCJ>, Enc_12829314 {
+tc_eb07ef6f, TypeCJ>, Enc_ad1c74 {
let Inst{0-0} = 0b0;
let Inst{13-8} = 0b100011;
let Inst{31-22} = 0b0001001111;
@@ -8111,7 +8165,7 @@ def J4_tstbit0_t_jumpnv_nt : HInst<
(outs),
(ins IntRegs:$Ns8, b30_2Imm:$Ii),
"if (tstbit($Ns8.new,#0)) jump:nt $Ii",
-NCJ_tc_3or4stall_SLOT0, TypeNCJ>, Enc_1898420 {
+tc_dbe218dd, TypeNCJ>, Enc_69d63b {
let Inst{0-0} = 0b0;
let Inst{13-8} = 0b000000;
let Inst{19-19} = 0b0;
@@ -8134,7 +8188,7 @@ def J4_tstbit0_t_jumpnv_t : HInst<
(outs),
(ins IntRegs:$Ns8, b30_2Imm:$Ii),
"if (tstbit($Ns8.new,#0)) jump:t $Ii",
-NCJ_tc_3or4stall_SLOT0, TypeNCJ>, Enc_1898420 {
+tc_dbe218dd, TypeNCJ>, Enc_69d63b {
let Inst{0-0} = 0b0;
let Inst{13-8} = 0b100000;
let Inst{19-19} = 0b0;
@@ -8157,7 +8211,7 @@ def J4_tstbit0_tp0_jump_nt : HInst<
(outs),
(ins GeneralSubRegs:$Rs16, b30_2Imm:$Ii),
"p0 = tstbit($Rs16,#0); if (p0.new) jump:nt $Ii",
-COMPOUND_CJ_ARCHDEPSLOT, TypeCJ>, Enc_12829314 {
+tc_eb07ef6f, TypeCJ>, Enc_ad1c74 {
let Inst{0-0} = 0b0;
let Inst{13-8} = 0b000011;
let Inst{31-22} = 0b0001000110;
@@ -8178,7 +8232,7 @@ def J4_tstbit0_tp0_jump_t : HInst<
(outs),
(ins GeneralSubRegs:$Rs16, b30_2Imm:$Ii),
"p0 = tstbit($Rs16,#0); if (p0.new) jump:t $Ii",
-COMPOUND_CJ_ARCHDEPSLOT, TypeCJ>, Enc_12829314 {
+tc_eb07ef6f, TypeCJ>, Enc_ad1c74 {
let Inst{0-0} = 0b0;
let Inst{13-8} = 0b100011;
let Inst{31-22} = 0b0001000110;
@@ -8199,7 +8253,7 @@ def J4_tstbit0_tp1_jump_nt : HInst<
(outs),
(ins GeneralSubRegs:$Rs16, b30_2Imm:$Ii),
"p1 = tstbit($Rs16,#0); if (p1.new) jump:nt $Ii",
-COMPOUND_CJ_ARCHDEPSLOT, TypeCJ>, Enc_12829314 {
+tc_eb07ef6f, TypeCJ>, Enc_ad1c74 {
let Inst{0-0} = 0b0;
let Inst{13-8} = 0b000011;
let Inst{31-22} = 0b0001001110;
@@ -8220,7 +8274,7 @@ def J4_tstbit0_tp1_jump_t : HInst<
(outs),
(ins GeneralSubRegs:$Rs16, b30_2Imm:$Ii),
"p1 = tstbit($Rs16,#0); if (p1.new) jump:t $Ii",
-COMPOUND_CJ_ARCHDEPSLOT, TypeCJ>, Enc_12829314 {
+tc_eb07ef6f, TypeCJ>, Enc_ad1c74 {
let Inst{0-0} = 0b0;
let Inst{13-8} = 0b100011;
let Inst{31-22} = 0b0001001110;
@@ -8241,7 +8295,7 @@ def L2_deallocframe : HInst<
(outs),
(ins),
"deallocframe",
-LD_tc_ld_SLOT01, TypeLD>, Enc_0 {
+tc_c1dbc916, TypeLD>, Enc_3a3d62 {
let Inst{4-0} = 0b11110;
let Inst{13-5} = 0b000000000;
let Inst{31-21} = 0b10010000000;
@@ -8255,7 +8309,7 @@ def L2_loadalignb_io : HInst<
(outs DoubleRegs:$Ryy32),
(ins DoubleRegs:$Ryy32in, IntRegs:$Rs32, s32_0Imm:$Ii),
"$Ryy32 = memb_fifo($Rs32+#$Ii)",
-LD_tc_ld_SLOT01, TypeLD>, Enc_449439 {
+tc_14da557c, TypeLD>, Enc_a27588 {
let Inst{24-21} = 0b0100;
let Inst{31-27} = 0b10010;
let addrMode = BaseImmOffset;
@@ -8272,7 +8326,7 @@ def L2_loadalignb_pbr : HInst<
(outs DoubleRegs:$Ryy32, IntRegs:$Rx32),
(ins DoubleRegs:$Ryy32in, IntRegs:$Rx32in, ModRegs:$Mu2),
"$Ryy32 = memb_fifo($Rx32++$Mu2:brev)",
-LD_tc_ld_SLOT01, TypeLD>, Enc_12261611 {
+tc_ae762521, TypeLD>, Enc_1f5d8f {
let Inst{12-5} = 0b00000000;
let Inst{31-21} = 0b10011110100;
let accessSize = ByteAccess;
@@ -8283,7 +8337,7 @@ def L2_loadalignb_pci : HInst<
(outs DoubleRegs:$Ryy32, IntRegs:$Rx32),
(ins DoubleRegs:$Ryy32in, IntRegs:$Rx32in, s4_0Imm:$Ii, ModRegs:$Mu2),
"$Ryy32 = memb_fifo($Rx32++#$Ii:circ($Mu2))",
-LD_tc_ld_SLOT01, TypeLD>, Enc_971347 {
+tc_d2a33af5, TypeLD>, Enc_74aef2 {
let Inst{12-9} = 0b0000;
let Inst{31-21} = 0b10011000100;
let addrMode = PostInc;
@@ -8296,7 +8350,7 @@ def L2_loadalignb_pcr : HInst<
(outs DoubleRegs:$Ryy32, IntRegs:$Rx32),
(ins DoubleRegs:$Ryy32in, IntRegs:$Rx32in, ModRegs:$Mu2),
"$Ryy32 = memb_fifo($Rx32++I:circ($Mu2))",
-LD_tc_ld_SLOT01, TypeLD>, Enc_12261611 {
+tc_ae762521, TypeLD>, Enc_1f5d8f {
let Inst{12-5} = 0b00010000;
let Inst{31-21} = 0b10011000100;
let addrMode = PostInc;
@@ -8309,7 +8363,7 @@ def L2_loadalignb_pi : HInst<
(outs DoubleRegs:$Ryy32, IntRegs:$Rx32),
(ins DoubleRegs:$Ryy32in, IntRegs:$Rx32in, s4_0Imm:$Ii),
"$Ryy32 = memb_fifo($Rx32++#$Ii)",
-LD_tc_ld_pi_SLOT01, TypeLD>, Enc_6372758 {
+tc_ae762521, TypeLD>, Enc_6b197f {
let Inst{13-9} = 0b00000;
let Inst{31-21} = 0b10011010100;
let addrMode = PostInc;
@@ -8321,7 +8375,7 @@ def L2_loadalignb_pr : HInst<
(outs DoubleRegs:$Ryy32, IntRegs:$Rx32),
(ins DoubleRegs:$Ryy32in, IntRegs:$Rx32in, ModRegs:$Mu2),
"$Ryy32 = memb_fifo($Rx32++$Mu2)",
-LD_tc_ld_SLOT01, TypeLD>, Enc_12261611 {
+tc_ae762521, TypeLD>, Enc_1f5d8f {
let Inst{12-5} = 0b00000000;
let Inst{31-21} = 0b10011100100;
let addrMode = PostInc;
@@ -8333,7 +8387,7 @@ def L2_loadalignb_zomap : HInst<
(outs DoubleRegs:$Ryy32),
(ins DoubleRegs:$Ryy32in, IntRegs:$Rs32),
"$Ryy32 = memb_fifo($Rs32)",
-PSEUDO, TypeMAPPING> {
+tc_14da557c, TypeMAPPING> {
let isPseudo = 1;
let isCodeGenOnly = 1;
let Constraints = "$Ryy32 = $Ryy32in";
@@ -8342,7 +8396,7 @@ def L2_loadalignh_io : HInst<
(outs DoubleRegs:$Ryy32),
(ins DoubleRegs:$Ryy32in, IntRegs:$Rs32, s31_1Imm:$Ii),
"$Ryy32 = memh_fifo($Rs32+#$Ii)",
-LD_tc_ld_SLOT01, TypeLD>, Enc_11930027 {
+tc_14da557c, TypeLD>, Enc_5cd7e9 {
let Inst{24-21} = 0b0010;
let Inst{31-27} = 0b10010;
let addrMode = BaseImmOffset;
@@ -8359,7 +8413,7 @@ def L2_loadalignh_pbr : HInst<
(outs DoubleRegs:$Ryy32, IntRegs:$Rx32),
(ins DoubleRegs:$Ryy32in, IntRegs:$Rx32in, ModRegs:$Mu2),
"$Ryy32 = memh_fifo($Rx32++$Mu2:brev)",
-LD_tc_ld_SLOT01, TypeLD>, Enc_12261611 {
+tc_ae762521, TypeLD>, Enc_1f5d8f {
let Inst{12-5} = 0b00000000;
let Inst{31-21} = 0b10011110010;
let accessSize = HalfWordAccess;
@@ -8370,7 +8424,7 @@ def L2_loadalignh_pci : HInst<
(outs DoubleRegs:$Ryy32, IntRegs:$Rx32),
(ins DoubleRegs:$Ryy32in, IntRegs:$Rx32in, s4_1Imm:$Ii, ModRegs:$Mu2),
"$Ryy32 = memh_fifo($Rx32++#$Ii:circ($Mu2))",
-LD_tc_ld_SLOT01, TypeLD>, Enc_1971351 {
+tc_d2a33af5, TypeLD>, Enc_9e2e1c {
let Inst{12-9} = 0b0000;
let Inst{31-21} = 0b10011000010;
let addrMode = PostInc;
@@ -8383,7 +8437,7 @@ def L2_loadalignh_pcr : HInst<
(outs DoubleRegs:$Ryy32, IntRegs:$Rx32),
(ins DoubleRegs:$Ryy32in, IntRegs:$Rx32in, ModRegs:$Mu2),
"$Ryy32 = memh_fifo($Rx32++I:circ($Mu2))",
-LD_tc_ld_SLOT01, TypeLD>, Enc_12261611 {
+tc_ae762521, TypeLD>, Enc_1f5d8f {
let Inst{12-5} = 0b00010000;
let Inst{31-21} = 0b10011000010;
let addrMode = PostInc;
@@ -8396,7 +8450,7 @@ def L2_loadalignh_pi : HInst<
(outs DoubleRegs:$Ryy32, IntRegs:$Rx32),
(ins DoubleRegs:$Ryy32in, IntRegs:$Rx32in, s4_1Imm:$Ii),
"$Ryy32 = memh_fifo($Rx32++#$Ii)",
-LD_tc_ld_pi_SLOT01, TypeLD>, Enc_3372766 {
+tc_ae762521, TypeLD>, Enc_bd1cbc {
let Inst{13-9} = 0b00000;
let Inst{31-21} = 0b10011010010;
let addrMode = PostInc;
@@ -8408,7 +8462,7 @@ def L2_loadalignh_pr : HInst<
(outs DoubleRegs:$Ryy32, IntRegs:$Rx32),
(ins DoubleRegs:$Ryy32in, IntRegs:$Rx32in, ModRegs:$Mu2),
"$Ryy32 = memh_fifo($Rx32++$Mu2)",
-LD_tc_ld_SLOT01, TypeLD>, Enc_12261611 {
+tc_ae762521, TypeLD>, Enc_1f5d8f {
let Inst{12-5} = 0b00000000;
let Inst{31-21} = 0b10011100010;
let addrMode = PostInc;
@@ -8420,7 +8474,7 @@ def L2_loadalignh_zomap : HInst<
(outs DoubleRegs:$Ryy32),
(ins DoubleRegs:$Ryy32in, IntRegs:$Rs32),
"$Ryy32 = memh_fifo($Rs32)",
-PSEUDO, TypeMAPPING> {
+tc_14da557c, TypeMAPPING> {
let isPseudo = 1;
let isCodeGenOnly = 1;
let Constraints = "$Ryy32 = $Ryy32in";
@@ -8429,7 +8483,7 @@ def L2_loadbsw2_io : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rs32, s31_1Imm:$Ii),
"$Rd32 = membh($Rs32+#$Ii)",
-LD_tc_ld_SLOT01, TypeLD>, Enc_15275738 {
+tc_bf6fa601, TypeLD>, Enc_de0214 {
let Inst{24-21} = 0b0001;
let Inst{31-27} = 0b10010;
let hasNewValue = 1;
@@ -8447,7 +8501,7 @@ def L2_loadbsw2_pbr : HInst<
(outs IntRegs:$Rd32, IntRegs:$Rx32),
(ins IntRegs:$Rx32in, ModRegs:$Mu2),
"$Rd32 = membh($Rx32++$Mu2:brev)",
-LD_tc_ld_SLOT01, TypeLD>, Enc_48594 {
+tc_65dc7cc4, TypeLD>, Enc_74d4e5 {
let Inst{12-5} = 0b00000000;
let Inst{31-21} = 0b10011110001;
let hasNewValue = 1;
@@ -8460,7 +8514,7 @@ def L2_loadbsw2_pci : HInst<
(outs IntRegs:$Rd32, IntRegs:$Rx32),
(ins IntRegs:$Rx32in, s4_1Imm:$Ii, ModRegs:$Mu2),
"$Rd32 = membh($Rx32++#$Ii:circ($Mu2))",
-LD_tc_ld_SLOT01, TypeLD>, Enc_13303422 {
+tc_3eab77bd, TypeLD>, Enc_e83554 {
let Inst{12-9} = 0b0000;
let Inst{31-21} = 0b10011000001;
let hasNewValue = 1;
@@ -8475,7 +8529,7 @@ def L2_loadbsw2_pcr : HInst<
(outs IntRegs:$Rd32, IntRegs:$Rx32),
(ins IntRegs:$Rx32in, ModRegs:$Mu2),
"$Rd32 = membh($Rx32++I:circ($Mu2))",
-LD_tc_ld_SLOT01, TypeLD>, Enc_48594 {
+tc_65dc7cc4, TypeLD>, Enc_74d4e5 {
let Inst{12-5} = 0b00010000;
let Inst{31-21} = 0b10011000001;
let hasNewValue = 1;
@@ -8490,7 +8544,7 @@ def L2_loadbsw2_pi : HInst<
(outs IntRegs:$Rd32, IntRegs:$Rx32),
(ins IntRegs:$Rx32in, s4_1Imm:$Ii),
"$Rd32 = membh($Rx32++#$Ii)",
-LD_tc_ld_pi_SLOT01, TypeLD>, Enc_15376009 {
+tc_65dc7cc4, TypeLD>, Enc_152467 {
let Inst{13-9} = 0b00000;
let Inst{31-21} = 0b10011010001;
let hasNewValue = 1;
@@ -8504,7 +8558,7 @@ def L2_loadbsw2_pr : HInst<
(outs IntRegs:$Rd32, IntRegs:$Rx32),
(ins IntRegs:$Rx32in, ModRegs:$Mu2),
"$Rd32 = membh($Rx32++$Mu2)",
-LD_tc_ld_SLOT01, TypeLD>, Enc_48594 {
+tc_65dc7cc4, TypeLD>, Enc_74d4e5 {
let Inst{12-5} = 0b00000000;
let Inst{31-21} = 0b10011100001;
let hasNewValue = 1;
@@ -8518,7 +8572,7 @@ def L2_loadbsw2_zomap : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rs32),
"$Rd32 = membh($Rs32)",
-PSEUDO, TypeMAPPING> {
+tc_bf6fa601, TypeMAPPING> {
let hasNewValue = 1;
let opNewValue = 0;
let isPseudo = 1;
@@ -8528,7 +8582,7 @@ def L2_loadbsw4_io : HInst<
(outs DoubleRegs:$Rdd32),
(ins IntRegs:$Rs32, s30_2Imm:$Ii),
"$Rdd32 = membh($Rs32+#$Ii)",
-LD_tc_ld_SLOT01, TypeLD>, Enc_9852473 {
+tc_bf6fa601, TypeLD>, Enc_2d7491 {
let Inst{24-21} = 0b0111;
let Inst{31-27} = 0b10010;
let addrMode = BaseImmOffset;
@@ -8544,7 +8598,7 @@ def L2_loadbsw4_pbr : HInst<
(outs DoubleRegs:$Rdd32, IntRegs:$Rx32),
(ins IntRegs:$Rx32in, ModRegs:$Mu2),
"$Rdd32 = membh($Rx32++$Mu2:brev)",
-LD_tc_ld_SLOT01, TypeLD>, Enc_2901241 {
+tc_65dc7cc4, TypeLD>, Enc_7eee72 {
let Inst{12-5} = 0b00000000;
let Inst{31-21} = 0b10011110111;
let accessSize = WordAccess;
@@ -8555,7 +8609,7 @@ def L2_loadbsw4_pci : HInst<
(outs DoubleRegs:$Rdd32, IntRegs:$Rx32),
(ins IntRegs:$Rx32in, s4_2Imm:$Ii, ModRegs:$Mu2),
"$Rdd32 = membh($Rx32++#$Ii:circ($Mu2))",
-LD_tc_ld_SLOT01, TypeLD>, Enc_3931661 {
+tc_3eab77bd, TypeLD>, Enc_70b24b {
let Inst{12-9} = 0b0000;
let Inst{31-21} = 0b10011000111;
let addrMode = PostInc;
@@ -8568,7 +8622,7 @@ def L2_loadbsw4_pcr : HInst<
(outs DoubleRegs:$Rdd32, IntRegs:$Rx32),
(ins IntRegs:$Rx32in, ModRegs:$Mu2),
"$Rdd32 = membh($Rx32++I:circ($Mu2))",
-LD_tc_ld_SLOT01, TypeLD>, Enc_2901241 {
+tc_65dc7cc4, TypeLD>, Enc_7eee72 {
let Inst{12-5} = 0b00010000;
let Inst{31-21} = 0b10011000111;
let addrMode = PostInc;
@@ -8581,7 +8635,7 @@ def L2_loadbsw4_pi : HInst<
(outs DoubleRegs:$Rdd32, IntRegs:$Rx32),
(ins IntRegs:$Rx32in, s4_2Imm:$Ii),
"$Rdd32 = membh($Rx32++#$Ii)",
-LD_tc_ld_pi_SLOT01, TypeLD>, Enc_8752140 {
+tc_65dc7cc4, TypeLD>, Enc_71f1b4 {
let Inst{13-9} = 0b00000;
let Inst{31-21} = 0b10011010111;
let addrMode = PostInc;
@@ -8593,7 +8647,7 @@ def L2_loadbsw4_pr : HInst<
(outs DoubleRegs:$Rdd32, IntRegs:$Rx32),
(ins IntRegs:$Rx32in, ModRegs:$Mu2),
"$Rdd32 = membh($Rx32++$Mu2)",
-LD_tc_ld_SLOT01, TypeLD>, Enc_2901241 {
+tc_65dc7cc4, TypeLD>, Enc_7eee72 {
let Inst{12-5} = 0b00000000;
let Inst{31-21} = 0b10011100111;
let addrMode = PostInc;
@@ -8605,7 +8659,7 @@ def L2_loadbsw4_zomap : HInst<
(outs DoubleRegs:$Rdd32),
(ins IntRegs:$Rs32),
"$Rdd32 = membh($Rs32)",
-PSEUDO, TypeMAPPING> {
+tc_bf6fa601, TypeMAPPING> {
let isPseudo = 1;
let isCodeGenOnly = 1;
}
@@ -8613,7 +8667,7 @@ def L2_loadbzw2_io : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rs32, s31_1Imm:$Ii),
"$Rd32 = memubh($Rs32+#$Ii)",
-LD_tc_ld_SLOT01, TypeLD>, Enc_15275738 {
+tc_bf6fa601, TypeLD>, Enc_de0214 {
let Inst{24-21} = 0b0011;
let Inst{31-27} = 0b10010;
let hasNewValue = 1;
@@ -8631,7 +8685,7 @@ def L2_loadbzw2_pbr : HInst<
(outs IntRegs:$Rd32, IntRegs:$Rx32),
(ins IntRegs:$Rx32in, ModRegs:$Mu2),
"$Rd32 = memubh($Rx32++$Mu2:brev)",
-LD_tc_ld_SLOT01, TypeLD>, Enc_48594 {
+tc_65dc7cc4, TypeLD>, Enc_74d4e5 {
let Inst{12-5} = 0b00000000;
let Inst{31-21} = 0b10011110011;
let hasNewValue = 1;
@@ -8644,7 +8698,7 @@ def L2_loadbzw2_pci : HInst<
(outs IntRegs:$Rd32, IntRegs:$Rx32),
(ins IntRegs:$Rx32in, s4_1Imm:$Ii, ModRegs:$Mu2),
"$Rd32 = memubh($Rx32++#$Ii:circ($Mu2))",
-LD_tc_ld_SLOT01, TypeLD>, Enc_13303422 {
+tc_3eab77bd, TypeLD>, Enc_e83554 {
let Inst{12-9} = 0b0000;
let Inst{31-21} = 0b10011000011;
let hasNewValue = 1;
@@ -8659,7 +8713,7 @@ def L2_loadbzw2_pcr : HInst<
(outs IntRegs:$Rd32, IntRegs:$Rx32),
(ins IntRegs:$Rx32in, ModRegs:$Mu2),
"$Rd32 = memubh($Rx32++I:circ($Mu2))",
-LD_tc_ld_SLOT01, TypeLD>, Enc_48594 {
+tc_65dc7cc4, TypeLD>, Enc_74d4e5 {
let Inst{12-5} = 0b00010000;
let Inst{31-21} = 0b10011000011;
let hasNewValue = 1;
@@ -8674,7 +8728,7 @@ def L2_loadbzw2_pi : HInst<
(outs IntRegs:$Rd32, IntRegs:$Rx32),
(ins IntRegs:$Rx32in, s4_1Imm:$Ii),
"$Rd32 = memubh($Rx32++#$Ii)",
-LD_tc_ld_pi_SLOT01, TypeLD>, Enc_15376009 {
+tc_65dc7cc4, TypeLD>, Enc_152467 {
let Inst{13-9} = 0b00000;
let Inst{31-21} = 0b10011010011;
let hasNewValue = 1;
@@ -8688,7 +8742,7 @@ def L2_loadbzw2_pr : HInst<
(outs IntRegs:$Rd32, IntRegs:$Rx32),
(ins IntRegs:$Rx32in, ModRegs:$Mu2),
"$Rd32 = memubh($Rx32++$Mu2)",
-LD_tc_ld_SLOT01, TypeLD>, Enc_48594 {
+tc_65dc7cc4, TypeLD>, Enc_74d4e5 {
let Inst{12-5} = 0b00000000;
let Inst{31-21} = 0b10011100011;
let hasNewValue = 1;
@@ -8702,7 +8756,7 @@ def L2_loadbzw2_zomap : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rs32),
"$Rd32 = memubh($Rs32)",
-PSEUDO, TypeMAPPING> {
+tc_bf6fa601, TypeMAPPING> {
let hasNewValue = 1;
let opNewValue = 0;
let isPseudo = 1;
@@ -8712,7 +8766,7 @@ def L2_loadbzw4_io : HInst<
(outs DoubleRegs:$Rdd32),
(ins IntRegs:$Rs32, s30_2Imm:$Ii),
"$Rdd32 = memubh($Rs32+#$Ii)",
-LD_tc_ld_SLOT01, TypeLD>, Enc_9852473 {
+tc_bf6fa601, TypeLD>, Enc_2d7491 {
let Inst{24-21} = 0b0101;
let Inst{31-27} = 0b10010;
let addrMode = BaseImmOffset;
@@ -8728,7 +8782,7 @@ def L2_loadbzw4_pbr : HInst<
(outs DoubleRegs:$Rdd32, IntRegs:$Rx32),
(ins IntRegs:$Rx32in, ModRegs:$Mu2),
"$Rdd32 = memubh($Rx32++$Mu2:brev)",
-LD_tc_ld_SLOT01, TypeLD>, Enc_2901241 {
+tc_65dc7cc4, TypeLD>, Enc_7eee72 {
let Inst{12-5} = 0b00000000;
let Inst{31-21} = 0b10011110101;
let accessSize = WordAccess;
@@ -8739,7 +8793,7 @@ def L2_loadbzw4_pci : HInst<
(outs DoubleRegs:$Rdd32, IntRegs:$Rx32),
(ins IntRegs:$Rx32in, s4_2Imm:$Ii, ModRegs:$Mu2),
"$Rdd32 = memubh($Rx32++#$Ii:circ($Mu2))",
-LD_tc_ld_SLOT01, TypeLD>, Enc_3931661 {
+tc_3eab77bd, TypeLD>, Enc_70b24b {
let Inst{12-9} = 0b0000;
let Inst{31-21} = 0b10011000101;
let addrMode = PostInc;
@@ -8752,7 +8806,7 @@ def L2_loadbzw4_pcr : HInst<
(outs DoubleRegs:$Rdd32, IntRegs:$Rx32),
(ins IntRegs:$Rx32in, ModRegs:$Mu2),
"$Rdd32 = memubh($Rx32++I:circ($Mu2))",
-LD_tc_ld_SLOT01, TypeLD>, Enc_2901241 {
+tc_65dc7cc4, TypeLD>, Enc_7eee72 {
let Inst{12-5} = 0b00010000;
let Inst{31-21} = 0b10011000101;
let addrMode = PostInc;
@@ -8765,7 +8819,7 @@ def L2_loadbzw4_pi : HInst<
(outs DoubleRegs:$Rdd32, IntRegs:$Rx32),
(ins IntRegs:$Rx32in, s4_2Imm:$Ii),
"$Rdd32 = memubh($Rx32++#$Ii)",
-LD_tc_ld_pi_SLOT01, TypeLD>, Enc_8752140 {
+tc_65dc7cc4, TypeLD>, Enc_71f1b4 {
let Inst{13-9} = 0b00000;
let Inst{31-21} = 0b10011010101;
let addrMode = PostInc;
@@ -8777,7 +8831,7 @@ def L2_loadbzw4_pr : HInst<
(outs DoubleRegs:$Rdd32, IntRegs:$Rx32),
(ins IntRegs:$Rx32in, ModRegs:$Mu2),
"$Rdd32 = memubh($Rx32++$Mu2)",
-LD_tc_ld_SLOT01, TypeLD>, Enc_2901241 {
+tc_65dc7cc4, TypeLD>, Enc_7eee72 {
let Inst{12-5} = 0b00000000;
let Inst{31-21} = 0b10011100101;
let addrMode = PostInc;
@@ -8789,7 +8843,7 @@ def L2_loadbzw4_zomap : HInst<
(outs DoubleRegs:$Rdd32),
(ins IntRegs:$Rs32),
"$Rdd32 = memubh($Rs32)",
-PSEUDO, TypeMAPPING> {
+tc_bf6fa601, TypeMAPPING> {
let isPseudo = 1;
let isCodeGenOnly = 1;
}
@@ -8797,7 +8851,7 @@ def L2_loadrb_io : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rs32, s32_0Imm:$Ii),
"$Rd32 = memb($Rs32+#$Ii)",
-LD_tc_ld_SLOT01, TypeLD>, Enc_14461004, AddrModeRel {
+tc_bf6fa601, TypeLD>, Enc_211aaa, AddrModeRel {
let Inst{24-21} = 0b1000;
let Inst{31-27} = 0b10010;
let hasNewValue = 1;
@@ -8818,7 +8872,7 @@ def L2_loadrb_pbr : HInst<
(outs IntRegs:$Rd32, IntRegs:$Rx32),
(ins IntRegs:$Rx32in, ModRegs:$Mu2),
"$Rd32 = memb($Rx32++$Mu2:brev)",
-LD_tc_ld_SLOT01, TypeLD>, Enc_48594 {
+tc_65dc7cc4, TypeLD>, Enc_74d4e5 {
let Inst{12-5} = 0b00000000;
let Inst{31-21} = 0b10011111000;
let hasNewValue = 1;
@@ -8831,7 +8885,7 @@ def L2_loadrb_pci : HInst<
(outs IntRegs:$Rd32, IntRegs:$Rx32),
(ins IntRegs:$Rx32in, s4_0Imm:$Ii, ModRegs:$Mu2),
"$Rd32 = memb($Rx32++#$Ii:circ($Mu2))",
-LD_tc_ld_SLOT01, TypeLD>, Enc_16303398 {
+tc_3eab77bd, TypeLD>, Enc_e0a47a {
let Inst{12-9} = 0b0000;
let Inst{31-21} = 0b10011001000;
let hasNewValue = 1;
@@ -8846,7 +8900,7 @@ def L2_loadrb_pcr : HInst<
(outs IntRegs:$Rd32, IntRegs:$Rx32),
(ins IntRegs:$Rx32in, ModRegs:$Mu2),
"$Rd32 = memb($Rx32++I:circ($Mu2))",
-LD_tc_ld_SLOT01, TypeLD>, Enc_48594 {
+tc_65dc7cc4, TypeLD>, Enc_74d4e5 {
let Inst{12-5} = 0b00010000;
let Inst{31-21} = 0b10011001000;
let hasNewValue = 1;
@@ -8861,7 +8915,7 @@ def L2_loadrb_pi : HInst<
(outs IntRegs:$Rd32, IntRegs:$Rx32),
(ins IntRegs:$Rx32in, s4_0Imm:$Ii),
"$Rd32 = memb($Rx32++#$Ii)",
-LD_tc_ld_pi_SLOT01, TypeLD>, Enc_5598813, PredNewRel {
+tc_65dc7cc4, TypeLD>, Enc_222336, PredNewRel {
let Inst{13-9} = 0b00000;
let Inst{31-21} = 0b10011011000;
let hasNewValue = 1;
@@ -8877,7 +8931,7 @@ def L2_loadrb_pr : HInst<
(outs IntRegs:$Rd32, IntRegs:$Rx32),
(ins IntRegs:$Rx32in, ModRegs:$Mu2),
"$Rd32 = memb($Rx32++$Mu2)",
-LD_tc_ld_SLOT01, TypeLD>, Enc_48594 {
+tc_65dc7cc4, TypeLD>, Enc_74d4e5 {
let Inst{12-5} = 0b00000000;
let Inst{31-21} = 0b10011101000;
let hasNewValue = 1;
@@ -8891,7 +8945,7 @@ def L2_loadrb_zomap : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rs32),
"$Rd32 = memb($Rs32)",
-PSEUDO, TypeMAPPING> {
+tc_bf6fa601, TypeMAPPING> {
let hasNewValue = 1;
let opNewValue = 0;
let isPseudo = 1;
@@ -8901,7 +8955,7 @@ def L2_loadrbgp : HInst<
(outs IntRegs:$Rd32),
(ins u32_0Imm:$Ii),
"$Rd32 = memb(gp+#$Ii)",
-V2LDST_tc_ld_SLOT01, TypeV2LDST>, Enc_1886960, AddrModeRel {
+tc_70cabf66, TypeV2LDST>, Enc_25bef0, AddrModeRel {
let Inst{24-21} = 0b1000;
let Inst{31-27} = 0b01001;
let hasNewValue = 1;
@@ -8920,7 +8974,7 @@ def L2_loadrd_io : HInst<
(outs DoubleRegs:$Rdd32),
(ins IntRegs:$Rs32, s29_3Imm:$Ii),
"$Rdd32 = memd($Rs32+#$Ii)",
-LD_tc_ld_SLOT01, TypeLD>, Enc_163381, AddrModeRel {
+tc_bf6fa601, TypeLD>, Enc_fa3ba4, AddrModeRel {
let Inst{24-21} = 0b1110;
let Inst{31-27} = 0b10010;
let addrMode = BaseImmOffset;
@@ -8939,7 +8993,7 @@ def L2_loadrd_pbr : HInst<
(outs DoubleRegs:$Rdd32, IntRegs:$Rx32),
(ins IntRegs:$Rx32in, ModRegs:$Mu2),
"$Rdd32 = memd($Rx32++$Mu2:brev)",
-LD_tc_ld_SLOT01, TypeLD>, Enc_2901241 {
+tc_65dc7cc4, TypeLD>, Enc_7eee72 {
let Inst{12-5} = 0b00000000;
let Inst{31-21} = 0b10011111110;
let accessSize = DoubleWordAccess;
@@ -8950,7 +9004,7 @@ def L2_loadrd_pci : HInst<
(outs DoubleRegs:$Rdd32, IntRegs:$Rx32),
(ins IntRegs:$Rx32in, s4_3Imm:$Ii, ModRegs:$Mu2),
"$Rdd32 = memd($Rx32++#$Ii:circ($Mu2))",
-LD_tc_ld_SLOT01, TypeLD>, Enc_931653 {
+tc_3eab77bd, TypeLD>, Enc_b05839 {
let Inst{12-9} = 0b0000;
let Inst{31-21} = 0b10011001110;
let addrMode = PostInc;
@@ -8963,7 +9017,7 @@ def L2_loadrd_pcr : HInst<
(outs DoubleRegs:$Rdd32, IntRegs:$Rx32),
(ins IntRegs:$Rx32in, ModRegs:$Mu2),
"$Rdd32 = memd($Rx32++I:circ($Mu2))",
-LD_tc_ld_SLOT01, TypeLD>, Enc_2901241 {
+tc_65dc7cc4, TypeLD>, Enc_7eee72 {
let Inst{12-5} = 0b00010000;
let Inst{31-21} = 0b10011001110;
let addrMode = PostInc;
@@ -8976,7 +9030,7 @@ def L2_loadrd_pi : HInst<
(outs DoubleRegs:$Rdd32, IntRegs:$Rx32),
(ins IntRegs:$Rx32in, s4_3Imm:$Ii),
"$Rdd32 = memd($Rx32++#$Ii)",
-LD_tc_ld_pi_SLOT01, TypeLD>, Enc_9752128, PredNewRel {
+tc_65dc7cc4, TypeLD>, Enc_5bdd42, PredNewRel {
let Inst{13-9} = 0b00000;
let Inst{31-21} = 0b10011011110;
let addrMode = PostInc;
@@ -8990,7 +9044,7 @@ def L2_loadrd_pr : HInst<
(outs DoubleRegs:$Rdd32, IntRegs:$Rx32),
(ins IntRegs:$Rx32in, ModRegs:$Mu2),
"$Rdd32 = memd($Rx32++$Mu2)",
-LD_tc_ld_SLOT01, TypeLD>, Enc_2901241 {
+tc_65dc7cc4, TypeLD>, Enc_7eee72 {
let Inst{12-5} = 0b00000000;
let Inst{31-21} = 0b10011101110;
let addrMode = PostInc;
@@ -9002,7 +9056,7 @@ def L2_loadrd_zomap : HInst<
(outs DoubleRegs:$Rdd32),
(ins IntRegs:$Rs32),
"$Rdd32 = memd($Rs32)",
-PSEUDO, TypeMAPPING> {
+tc_bf6fa601, TypeMAPPING> {
let isPseudo = 1;
let isCodeGenOnly = 1;
}
@@ -9010,7 +9064,7 @@ def L2_loadrdgp : HInst<
(outs DoubleRegs:$Rdd32),
(ins u29_3Imm:$Ii),
"$Rdd32 = memd(gp+#$Ii)",
-V2LDST_tc_ld_SLOT01, TypeV2LDST>, Enc_4975051, AddrModeRel {
+tc_70cabf66, TypeV2LDST>, Enc_509701, AddrModeRel {
let Inst{24-21} = 0b1110;
let Inst{31-27} = 0b01001;
let accessSize = DoubleWordAccess;
@@ -9027,7 +9081,7 @@ def L2_loadrh_io : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rs32, s31_1Imm:$Ii),
"$Rd32 = memh($Rs32+#$Ii)",
-LD_tc_ld_SLOT01, TypeLD>, Enc_15275738, AddrModeRel {
+tc_bf6fa601, TypeLD>, Enc_de0214, AddrModeRel {
let Inst{24-21} = 0b1010;
let Inst{31-27} = 0b10010;
let hasNewValue = 1;
@@ -9048,7 +9102,7 @@ def L2_loadrh_pbr : HInst<
(outs IntRegs:$Rd32, IntRegs:$Rx32),
(ins IntRegs:$Rx32in, ModRegs:$Mu2),
"$Rd32 = memh($Rx32++$Mu2:brev)",
-LD_tc_ld_SLOT01, TypeLD>, Enc_48594 {
+tc_65dc7cc4, TypeLD>, Enc_74d4e5 {
let Inst{12-5} = 0b00000000;
let Inst{31-21} = 0b10011111010;
let hasNewValue = 1;
@@ -9061,7 +9115,7 @@ def L2_loadrh_pci : HInst<
(outs IntRegs:$Rd32, IntRegs:$Rx32),
(ins IntRegs:$Rx32in, s4_1Imm:$Ii, ModRegs:$Mu2),
"$Rd32 = memh($Rx32++#$Ii:circ($Mu2))",
-LD_tc_ld_SLOT01, TypeLD>, Enc_13303422 {
+tc_3eab77bd, TypeLD>, Enc_e83554 {
let Inst{12-9} = 0b0000;
let Inst{31-21} = 0b10011001010;
let hasNewValue = 1;
@@ -9076,7 +9130,7 @@ def L2_loadrh_pcr : HInst<
(outs IntRegs:$Rd32, IntRegs:$Rx32),
(ins IntRegs:$Rx32in, ModRegs:$Mu2),
"$Rd32 = memh($Rx32++I:circ($Mu2))",
-LD_tc_ld_SLOT01, TypeLD>, Enc_48594 {
+tc_65dc7cc4, TypeLD>, Enc_74d4e5 {
let Inst{12-5} = 0b00010000;
let Inst{31-21} = 0b10011001010;
let hasNewValue = 1;
@@ -9091,7 +9145,7 @@ def L2_loadrh_pi : HInst<
(outs IntRegs:$Rd32, IntRegs:$Rx32),
(ins IntRegs:$Rx32in, s4_1Imm:$Ii),
"$Rd32 = memh($Rx32++#$Ii)",
-LD_tc_ld_pi_SLOT01, TypeLD>, Enc_15376009, PredNewRel {
+tc_65dc7cc4, TypeLD>, Enc_152467, PredNewRel {
let Inst{13-9} = 0b00000;
let Inst{31-21} = 0b10011011010;
let hasNewValue = 1;
@@ -9107,7 +9161,7 @@ def L2_loadrh_pr : HInst<
(outs IntRegs:$Rd32, IntRegs:$Rx32),
(ins IntRegs:$Rx32in, ModRegs:$Mu2),
"$Rd32 = memh($Rx32++$Mu2)",
-LD_tc_ld_SLOT01, TypeLD>, Enc_48594 {
+tc_65dc7cc4, TypeLD>, Enc_74d4e5 {
let Inst{12-5} = 0b00000000;
let Inst{31-21} = 0b10011101010;
let hasNewValue = 1;
@@ -9121,7 +9175,7 @@ def L2_loadrh_zomap : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rs32),
"$Rd32 = memh($Rs32)",
-PSEUDO, TypeMAPPING> {
+tc_bf6fa601, TypeMAPPING> {
let hasNewValue = 1;
let opNewValue = 0;
let isPseudo = 1;
@@ -9131,7 +9185,7 @@ def L2_loadrhgp : HInst<
(outs IntRegs:$Rd32),
(ins u31_1Imm:$Ii),
"$Rd32 = memh(gp+#$Ii)",
-V2LDST_tc_ld_SLOT01, TypeV2LDST>, Enc_12608570, AddrModeRel {
+tc_70cabf66, TypeV2LDST>, Enc_8df4be, AddrModeRel {
let Inst{24-21} = 0b1010;
let Inst{31-27} = 0b01001;
let hasNewValue = 1;
@@ -9150,7 +9204,7 @@ def L2_loadri_io : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rs32, s30_2Imm:$Ii),
"$Rd32 = memw($Rs32+#$Ii)",
-LD_tc_ld_SLOT01, TypeLD>, Enc_8990840, AddrModeRel {
+tc_bf6fa601, TypeLD>, Enc_2a3787, AddrModeRel {
let Inst{24-21} = 0b1100;
let Inst{31-27} = 0b10010;
let hasNewValue = 1;
@@ -9171,7 +9225,7 @@ def L2_loadri_pbr : HInst<
(outs IntRegs:$Rd32, IntRegs:$Rx32),
(ins IntRegs:$Rx32in, ModRegs:$Mu2),
"$Rd32 = memw($Rx32++$Mu2:brev)",
-LD_tc_ld_SLOT01, TypeLD>, Enc_48594 {
+tc_65dc7cc4, TypeLD>, Enc_74d4e5 {
let Inst{12-5} = 0b00000000;
let Inst{31-21} = 0b10011111100;
let hasNewValue = 1;
@@ -9184,7 +9238,7 @@ def L2_loadri_pci : HInst<
(outs IntRegs:$Rd32, IntRegs:$Rx32),
(ins IntRegs:$Rx32in, s4_2Imm:$Ii, ModRegs:$Mu2),
"$Rd32 = memw($Rx32++#$Ii:circ($Mu2))",
-LD_tc_ld_SLOT01, TypeLD>, Enc_14303394 {
+tc_3eab77bd, TypeLD>, Enc_27fd0e {
let Inst{12-9} = 0b0000;
let Inst{31-21} = 0b10011001100;
let hasNewValue = 1;
@@ -9199,7 +9253,7 @@ def L2_loadri_pcr : HInst<
(outs IntRegs:$Rd32, IntRegs:$Rx32),
(ins IntRegs:$Rx32in, ModRegs:$Mu2),
"$Rd32 = memw($Rx32++I:circ($Mu2))",
-LD_tc_ld_SLOT01, TypeLD>, Enc_48594 {
+tc_65dc7cc4, TypeLD>, Enc_74d4e5 {
let Inst{12-5} = 0b00010000;
let Inst{31-21} = 0b10011001100;
let hasNewValue = 1;
@@ -9214,7 +9268,7 @@ def L2_loadri_pi : HInst<
(outs IntRegs:$Rd32, IntRegs:$Rx32),
(ins IntRegs:$Rx32in, s4_2Imm:$Ii),
"$Rd32 = memw($Rx32++#$Ii)",
-LD_tc_ld_pi_SLOT01, TypeLD>, Enc_16376009, PredNewRel {
+tc_65dc7cc4, TypeLD>, Enc_3d920a, PredNewRel {
let Inst{13-9} = 0b00000;
let Inst{31-21} = 0b10011011100;
let hasNewValue = 1;
@@ -9230,7 +9284,7 @@ def L2_loadri_pr : HInst<
(outs IntRegs:$Rd32, IntRegs:$Rx32),
(ins IntRegs:$Rx32in, ModRegs:$Mu2),
"$Rd32 = memw($Rx32++$Mu2)",
-LD_tc_ld_SLOT01, TypeLD>, Enc_48594 {
+tc_65dc7cc4, TypeLD>, Enc_74d4e5 {
let Inst{12-5} = 0b00000000;
let Inst{31-21} = 0b10011101100;
let hasNewValue = 1;
@@ -9244,7 +9298,7 @@ def L2_loadri_zomap : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rs32),
"$Rd32 = memw($Rs32)",
-PSEUDO, TypeMAPPING> {
+tc_bf6fa601, TypeMAPPING> {
let hasNewValue = 1;
let opNewValue = 0;
let isPseudo = 1;
@@ -9254,7 +9308,7 @@ def L2_loadrigp : HInst<
(outs IntRegs:$Rd32),
(ins u30_2Imm:$Ii),
"$Rd32 = memw(gp+#$Ii)",
-V2LDST_tc_ld_SLOT01, TypeV2LDST>, Enc_8814718, AddrModeRel {
+tc_70cabf66, TypeV2LDST>, Enc_4f4ed7, AddrModeRel {
let Inst{24-21} = 0b1100;
let Inst{31-27} = 0b01001;
let hasNewValue = 1;
@@ -9273,7 +9327,7 @@ def L2_loadrub_io : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rs32, s32_0Imm:$Ii),
"$Rd32 = memub($Rs32+#$Ii)",
-LD_tc_ld_SLOT01, TypeLD>, Enc_14461004, AddrModeRel {
+tc_bf6fa601, TypeLD>, Enc_211aaa, AddrModeRel {
let Inst{24-21} = 0b1001;
let Inst{31-27} = 0b10010;
let hasNewValue = 1;
@@ -9294,7 +9348,7 @@ def L2_loadrub_pbr : HInst<
(outs IntRegs:$Rd32, IntRegs:$Rx32),
(ins IntRegs:$Rx32in, ModRegs:$Mu2),
"$Rd32 = memub($Rx32++$Mu2:brev)",
-LD_tc_ld_SLOT01, TypeLD>, Enc_48594 {
+tc_65dc7cc4, TypeLD>, Enc_74d4e5 {
let Inst{12-5} = 0b00000000;
let Inst{31-21} = 0b10011111001;
let hasNewValue = 1;
@@ -9307,7 +9361,7 @@ def L2_loadrub_pci : HInst<
(outs IntRegs:$Rd32, IntRegs:$Rx32),
(ins IntRegs:$Rx32in, s4_0Imm:$Ii, ModRegs:$Mu2),
"$Rd32 = memub($Rx32++#$Ii:circ($Mu2))",
-LD_tc_ld_SLOT01, TypeLD>, Enc_16303398 {
+tc_3eab77bd, TypeLD>, Enc_e0a47a {
let Inst{12-9} = 0b0000;
let Inst{31-21} = 0b10011001001;
let hasNewValue = 1;
@@ -9322,7 +9376,7 @@ def L2_loadrub_pcr : HInst<
(outs IntRegs:$Rd32, IntRegs:$Rx32),
(ins IntRegs:$Rx32in, ModRegs:$Mu2),
"$Rd32 = memub($Rx32++I:circ($Mu2))",
-LD_tc_ld_SLOT01, TypeLD>, Enc_48594 {
+tc_65dc7cc4, TypeLD>, Enc_74d4e5 {
let Inst{12-5} = 0b00010000;
let Inst{31-21} = 0b10011001001;
let hasNewValue = 1;
@@ -9337,7 +9391,7 @@ def L2_loadrub_pi : HInst<
(outs IntRegs:$Rd32, IntRegs:$Rx32),
(ins IntRegs:$Rx32in, s4_0Imm:$Ii),
"$Rd32 = memub($Rx32++#$Ii)",
-LD_tc_ld_pi_SLOT01, TypeLD>, Enc_5598813, PredNewRel {
+tc_65dc7cc4, TypeLD>, Enc_222336, PredNewRel {
let Inst{13-9} = 0b00000;
let Inst{31-21} = 0b10011011001;
let hasNewValue = 1;
@@ -9353,7 +9407,7 @@ def L2_loadrub_pr : HInst<
(outs IntRegs:$Rd32, IntRegs:$Rx32),
(ins IntRegs:$Rx32in, ModRegs:$Mu2),
"$Rd32 = memub($Rx32++$Mu2)",
-LD_tc_ld_SLOT01, TypeLD>, Enc_48594 {
+tc_65dc7cc4, TypeLD>, Enc_74d4e5 {
let Inst{12-5} = 0b00000000;
let Inst{31-21} = 0b10011101001;
let hasNewValue = 1;
@@ -9367,7 +9421,7 @@ def L2_loadrub_zomap : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rs32),
"$Rd32 = memub($Rs32)",
-PSEUDO, TypeMAPPING> {
+tc_bf6fa601, TypeMAPPING> {
let hasNewValue = 1;
let opNewValue = 0;
let isPseudo = 1;
@@ -9377,7 +9431,7 @@ def L2_loadrubgp : HInst<
(outs IntRegs:$Rd32),
(ins u32_0Imm:$Ii),
"$Rd32 = memub(gp+#$Ii)",
-V2LDST_tc_ld_SLOT01, TypeV2LDST>, Enc_1886960, AddrModeRel {
+tc_70cabf66, TypeV2LDST>, Enc_25bef0, AddrModeRel {
let Inst{24-21} = 0b1001;
let Inst{31-27} = 0b01001;
let hasNewValue = 1;
@@ -9396,7 +9450,7 @@ def L2_loadruh_io : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rs32, s31_1Imm:$Ii),
"$Rd32 = memuh($Rs32+#$Ii)",
-LD_tc_ld_SLOT01, TypeLD>, Enc_15275738, AddrModeRel {
+tc_bf6fa601, TypeLD>, Enc_de0214, AddrModeRel {
let Inst{24-21} = 0b1011;
let Inst{31-27} = 0b10010;
let hasNewValue = 1;
@@ -9417,7 +9471,7 @@ def L2_loadruh_pbr : HInst<
(outs IntRegs:$Rd32, IntRegs:$Rx32),
(ins IntRegs:$Rx32in, ModRegs:$Mu2),
"$Rd32 = memuh($Rx32++$Mu2:brev)",
-LD_tc_ld_SLOT01, TypeLD>, Enc_48594 {
+tc_65dc7cc4, TypeLD>, Enc_74d4e5 {
let Inst{12-5} = 0b00000000;
let Inst{31-21} = 0b10011111011;
let hasNewValue = 1;
@@ -9430,7 +9484,7 @@ def L2_loadruh_pci : HInst<
(outs IntRegs:$Rd32, IntRegs:$Rx32),
(ins IntRegs:$Rx32in, s4_1Imm:$Ii, ModRegs:$Mu2),
"$Rd32 = memuh($Rx32++#$Ii:circ($Mu2))",
-LD_tc_ld_SLOT01, TypeLD>, Enc_13303422 {
+tc_3eab77bd, TypeLD>, Enc_e83554 {
let Inst{12-9} = 0b0000;
let Inst{31-21} = 0b10011001011;
let hasNewValue = 1;
@@ -9445,7 +9499,7 @@ def L2_loadruh_pcr : HInst<
(outs IntRegs:$Rd32, IntRegs:$Rx32),
(ins IntRegs:$Rx32in, ModRegs:$Mu2),
"$Rd32 = memuh($Rx32++I:circ($Mu2))",
-LD_tc_ld_SLOT01, TypeLD>, Enc_48594 {
+tc_65dc7cc4, TypeLD>, Enc_74d4e5 {
let Inst{12-5} = 0b00010000;
let Inst{31-21} = 0b10011001011;
let hasNewValue = 1;
@@ -9460,7 +9514,7 @@ def L2_loadruh_pi : HInst<
(outs IntRegs:$Rd32, IntRegs:$Rx32),
(ins IntRegs:$Rx32in, s4_1Imm:$Ii),
"$Rd32 = memuh($Rx32++#$Ii)",
-LD_tc_ld_pi_SLOT01, TypeLD>, Enc_15376009, PredNewRel {
+tc_65dc7cc4, TypeLD>, Enc_152467, PredNewRel {
let Inst{13-9} = 0b00000;
let Inst{31-21} = 0b10011011011;
let hasNewValue = 1;
@@ -9476,7 +9530,7 @@ def L2_loadruh_pr : HInst<
(outs IntRegs:$Rd32, IntRegs:$Rx32),
(ins IntRegs:$Rx32in, ModRegs:$Mu2),
"$Rd32 = memuh($Rx32++$Mu2)",
-LD_tc_ld_SLOT01, TypeLD>, Enc_48594 {
+tc_65dc7cc4, TypeLD>, Enc_74d4e5 {
let Inst{12-5} = 0b00000000;
let Inst{31-21} = 0b10011101011;
let hasNewValue = 1;
@@ -9490,7 +9544,7 @@ def L2_loadruh_zomap : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rs32),
"$Rd32 = memuh($Rs32)",
-PSEUDO, TypeMAPPING> {
+tc_bf6fa601, TypeMAPPING> {
let hasNewValue = 1;
let opNewValue = 0;
let isPseudo = 1;
@@ -9500,7 +9554,7 @@ def L2_loadruhgp : HInst<
(outs IntRegs:$Rd32),
(ins u31_1Imm:$Ii),
"$Rd32 = memuh(gp+#$Ii)",
-V2LDST_tc_ld_SLOT01, TypeV2LDST>, Enc_12608570, AddrModeRel {
+tc_70cabf66, TypeV2LDST>, Enc_8df4be, AddrModeRel {
let Inst{24-21} = 0b1011;
let Inst{31-27} = 0b01001;
let hasNewValue = 1;
@@ -9519,20 +9573,20 @@ def L2_loadw_locked : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rs32),
"$Rd32 = memw_locked($Rs32)",
-LD_tc_ld_SLOT0, TypeLD>, Enc_4075554 {
+tc_29c14515, TypeLD>, Enc_5e2823 {
let Inst{13-5} = 0b000000000;
let Inst{31-21} = 0b10010010000;
let hasNewValue = 1;
let opNewValue = 0;
let accessSize = WordAccess;
-let isSoloAX = 1;
let mayLoad = 1;
+let isSoloAX = 1;
}
def L2_ploadrbf_io : HInst<
(outs IntRegs:$Rd32),
(ins PredRegs:$Pt4, IntRegs:$Rs32, u32_0Imm:$Ii),
"if (!$Pt4) $Rd32 = memb($Rs32+#$Ii)",
-V2LDST_tc_ld_SLOT01, TypeV2LDST>, Enc_4835423, AddrModeRel {
+tc_14da557c, TypeV2LDST>, Enc_a21d47, AddrModeRel {
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b01000101000;
let isPredicated = 1;
@@ -9554,7 +9608,7 @@ def L2_ploadrbf_pi : HInst<
(outs IntRegs:$Rd32, IntRegs:$Rx32),
(ins PredRegs:$Pt4, IntRegs:$Rx32in, s4_0Imm:$Ii),
"if (!$Pt4) $Rd32 = memb($Rx32++#$Ii)",
-LD_tc_ld_pi_SLOT01, TypeLD>, Enc_12212978, PredNewRel {
+tc_ae762521, TypeLD>, Enc_f4413a, PredNewRel {
let Inst{13-11} = 0b101;
let Inst{31-21} = 0b10011011000;
let isPredicated = 1;
@@ -9571,7 +9625,7 @@ def L2_ploadrbf_zomap : HInst<
(outs IntRegs:$Rd32),
(ins PredRegs:$Pt4, IntRegs:$Rs32),
"if (!$Pt4) $Rd32 = memb($Rs32)",
-PSEUDO, TypeMAPPING> {
+tc_14da557c, TypeMAPPING> {
let hasNewValue = 1;
let opNewValue = 0;
let isPseudo = 1;
@@ -9581,7 +9635,7 @@ def L2_ploadrbfnew_io : HInst<
(outs IntRegs:$Rd32),
(ins PredRegs:$Pt4, IntRegs:$Rs32, u32_0Imm:$Ii),
"if (!$Pt4.new) $Rd32 = memb($Rs32+#$Ii)",
-V2LDST_tc_ld_SLOT01, TypeV2LDST>, Enc_4835423, AddrModeRel {
+tc_65dc7cc4, TypeV2LDST>, Enc_a21d47, AddrModeRel {
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b01000111000;
let isPredicated = 1;
@@ -9604,7 +9658,7 @@ def L2_ploadrbfnew_pi : HInst<
(outs IntRegs:$Rd32, IntRegs:$Rx32),
(ins PredRegs:$Pt4, IntRegs:$Rx32in, s4_0Imm:$Ii),
"if (!$Pt4.new) $Rd32 = memb($Rx32++#$Ii)",
-LD_tc_ld_pi_SLOT01, TypeLD>, Enc_12212978, PredNewRel {
+tc_e578178f, TypeLD>, Enc_f4413a, PredNewRel {
let Inst{13-11} = 0b111;
let Inst{31-21} = 0b10011011000;
let isPredicated = 1;
@@ -9622,7 +9676,7 @@ def L2_ploadrbfnew_zomap : HInst<
(outs IntRegs:$Rd32),
(ins PredRegs:$Pt4, IntRegs:$Rs32),
"if (!$Pt4.new) $Rd32 = memb($Rs32)",
-PSEUDO, TypeMAPPING> {
+tc_65dc7cc4, TypeMAPPING> {
let hasNewValue = 1;
let opNewValue = 0;
let isPseudo = 1;
@@ -9632,7 +9686,7 @@ def L2_ploadrbt_io : HInst<
(outs IntRegs:$Rd32),
(ins PredRegs:$Pt4, IntRegs:$Rs32, u32_0Imm:$Ii),
"if ($Pt4) $Rd32 = memb($Rs32+#$Ii)",
-V2LDST_tc_ld_SLOT01, TypeV2LDST>, Enc_4835423, AddrModeRel {
+tc_14da557c, TypeV2LDST>, Enc_a21d47, AddrModeRel {
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b01000001000;
let isPredicated = 1;
@@ -9653,7 +9707,7 @@ def L2_ploadrbt_pi : HInst<
(outs IntRegs:$Rd32, IntRegs:$Rx32),
(ins PredRegs:$Pt4, IntRegs:$Rx32in, s4_0Imm:$Ii),
"if ($Pt4) $Rd32 = memb($Rx32++#$Ii)",
-LD_tc_ld_pi_SLOT01, TypeLD>, Enc_12212978, PredNewRel {
+tc_ae762521, TypeLD>, Enc_f4413a, PredNewRel {
let Inst{13-11} = 0b100;
let Inst{31-21} = 0b10011011000;
let isPredicated = 1;
@@ -9669,7 +9723,7 @@ def L2_ploadrbt_zomap : HInst<
(outs IntRegs:$Rd32),
(ins PredRegs:$Pt4, IntRegs:$Rs32),
"if ($Pt4) $Rd32 = memb($Rs32)",
-PSEUDO, TypeMAPPING> {
+tc_14da557c, TypeMAPPING> {
let hasNewValue = 1;
let opNewValue = 0;
let isPseudo = 1;
@@ -9679,7 +9733,7 @@ def L2_ploadrbtnew_io : HInst<
(outs IntRegs:$Rd32),
(ins PredRegs:$Pt4, IntRegs:$Rs32, u32_0Imm:$Ii),
"if ($Pt4.new) $Rd32 = memb($Rs32+#$Ii)",
-V2LDST_tc_ld_SLOT01, TypeV2LDST>, Enc_4835423, AddrModeRel {
+tc_65dc7cc4, TypeV2LDST>, Enc_a21d47, AddrModeRel {
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b01000011000;
let isPredicated = 1;
@@ -9701,7 +9755,7 @@ def L2_ploadrbtnew_pi : HInst<
(outs IntRegs:$Rd32, IntRegs:$Rx32),
(ins PredRegs:$Pt4, IntRegs:$Rx32in, s4_0Imm:$Ii),
"if ($Pt4.new) $Rd32 = memb($Rx32++#$Ii)",
-LD_tc_ld_pi_SLOT01, TypeLD>, Enc_12212978, PredNewRel {
+tc_e578178f, TypeLD>, Enc_f4413a, PredNewRel {
let Inst{13-11} = 0b110;
let Inst{31-21} = 0b10011011000;
let isPredicated = 1;
@@ -9718,7 +9772,7 @@ def L2_ploadrbtnew_zomap : HInst<
(outs IntRegs:$Rd32),
(ins PredRegs:$Pt4, IntRegs:$Rs32),
"if ($Pt4.new) $Rd32 = memb($Rs32)",
-PSEUDO, TypeMAPPING> {
+tc_65dc7cc4, TypeMAPPING> {
let hasNewValue = 1;
let opNewValue = 0;
let isPseudo = 1;
@@ -9728,7 +9782,7 @@ def L2_ploadrdf_io : HInst<
(outs DoubleRegs:$Rdd32),
(ins PredRegs:$Pt4, IntRegs:$Rs32, u29_3Imm:$Ii),
"if (!$Pt4) $Rdd32 = memd($Rs32+#$Ii)",
-V2LDST_tc_ld_SLOT01, TypeV2LDST>, Enc_677558, AddrModeRel {
+tc_14da557c, TypeV2LDST>, Enc_acd6ed, AddrModeRel {
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b01000101110;
let isPredicated = 1;
@@ -9748,7 +9802,7 @@ def L2_ploadrdf_pi : HInst<
(outs DoubleRegs:$Rdd32, IntRegs:$Rx32),
(ins PredRegs:$Pt4, IntRegs:$Rx32in, s4_3Imm:$Ii),
"if (!$Pt4) $Rdd32 = memd($Rx32++#$Ii)",
-LD_tc_ld_pi_SLOT01, TypeLD>, Enc_5611087, PredNewRel {
+tc_ae762521, TypeLD>, Enc_9d1247, PredNewRel {
let Inst{13-11} = 0b101;
let Inst{31-21} = 0b10011011110;
let isPredicated = 1;
@@ -9763,7 +9817,7 @@ def L2_ploadrdf_zomap : HInst<
(outs DoubleRegs:$Rdd32),
(ins PredRegs:$Pt4, IntRegs:$Rs32),
"if (!$Pt4) $Rdd32 = memd($Rs32)",
-PSEUDO, TypeMAPPING> {
+tc_14da557c, TypeMAPPING> {
let isPseudo = 1;
let isCodeGenOnly = 1;
}
@@ -9771,7 +9825,7 @@ def L2_ploadrdfnew_io : HInst<
(outs DoubleRegs:$Rdd32),
(ins PredRegs:$Pt4, IntRegs:$Rs32, u29_3Imm:$Ii),
"if (!$Pt4.new) $Rdd32 = memd($Rs32+#$Ii)",
-V2LDST_tc_ld_SLOT01, TypeV2LDST>, Enc_677558, AddrModeRel {
+tc_65dc7cc4, TypeV2LDST>, Enc_acd6ed, AddrModeRel {
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b01000111110;
let isPredicated = 1;
@@ -9792,7 +9846,7 @@ def L2_ploadrdfnew_pi : HInst<
(outs DoubleRegs:$Rdd32, IntRegs:$Rx32),
(ins PredRegs:$Pt4, IntRegs:$Rx32in, s4_3Imm:$Ii),
"if (!$Pt4.new) $Rdd32 = memd($Rx32++#$Ii)",
-LD_tc_ld_pi_SLOT01, TypeLD>, Enc_5611087, PredNewRel {
+tc_e578178f, TypeLD>, Enc_9d1247, PredNewRel {
let Inst{13-11} = 0b111;
let Inst{31-21} = 0b10011011110;
let isPredicated = 1;
@@ -9808,7 +9862,7 @@ def L2_ploadrdfnew_zomap : HInst<
(outs DoubleRegs:$Rdd32),
(ins PredRegs:$Pt4, IntRegs:$Rs32),
"if (!$Pt4.new) $Rdd32 = memd($Rs32)",
-PSEUDO, TypeMAPPING> {
+tc_65dc7cc4, TypeMAPPING> {
let isPseudo = 1;
let isCodeGenOnly = 1;
}
@@ -9816,7 +9870,7 @@ def L2_ploadrdt_io : HInst<
(outs DoubleRegs:$Rdd32),
(ins PredRegs:$Pt4, IntRegs:$Rs32, u29_3Imm:$Ii),
"if ($Pt4) $Rdd32 = memd($Rs32+#$Ii)",
-V2LDST_tc_ld_SLOT01, TypeV2LDST>, Enc_677558, AddrModeRel {
+tc_14da557c, TypeV2LDST>, Enc_acd6ed, AddrModeRel {
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b01000001110;
let isPredicated = 1;
@@ -9835,7 +9889,7 @@ def L2_ploadrdt_pi : HInst<
(outs DoubleRegs:$Rdd32, IntRegs:$Rx32),
(ins PredRegs:$Pt4, IntRegs:$Rx32in, s4_3Imm:$Ii),
"if ($Pt4) $Rdd32 = memd($Rx32++#$Ii)",
-LD_tc_ld_pi_SLOT01, TypeLD>, Enc_5611087, PredNewRel {
+tc_ae762521, TypeLD>, Enc_9d1247, PredNewRel {
let Inst{13-11} = 0b100;
let Inst{31-21} = 0b10011011110;
let isPredicated = 1;
@@ -9849,7 +9903,7 @@ def L2_ploadrdt_zomap : HInst<
(outs DoubleRegs:$Rdd32),
(ins PredRegs:$Pt4, IntRegs:$Rs32),
"if ($Pt4) $Rdd32 = memd($Rs32)",
-PSEUDO, TypeMAPPING> {
+tc_14da557c, TypeMAPPING> {
let isPseudo = 1;
let isCodeGenOnly = 1;
}
@@ -9857,7 +9911,7 @@ def L2_ploadrdtnew_io : HInst<
(outs DoubleRegs:$Rdd32),
(ins PredRegs:$Pt4, IntRegs:$Rs32, u29_3Imm:$Ii),
"if ($Pt4.new) $Rdd32 = memd($Rs32+#$Ii)",
-V2LDST_tc_ld_SLOT01, TypeV2LDST>, Enc_677558, AddrModeRel {
+tc_65dc7cc4, TypeV2LDST>, Enc_acd6ed, AddrModeRel {
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b01000011110;
let isPredicated = 1;
@@ -9877,7 +9931,7 @@ def L2_ploadrdtnew_pi : HInst<
(outs DoubleRegs:$Rdd32, IntRegs:$Rx32),
(ins PredRegs:$Pt4, IntRegs:$Rx32in, s4_3Imm:$Ii),
"if ($Pt4.new) $Rdd32 = memd($Rx32++#$Ii)",
-LD_tc_ld_pi_SLOT01, TypeLD>, Enc_5611087, PredNewRel {
+tc_e578178f, TypeLD>, Enc_9d1247, PredNewRel {
let Inst{13-11} = 0b110;
let Inst{31-21} = 0b10011011110;
let isPredicated = 1;
@@ -9892,7 +9946,7 @@ def L2_ploadrdtnew_zomap : HInst<
(outs DoubleRegs:$Rdd32),
(ins PredRegs:$Pt4, IntRegs:$Rs32),
"if ($Pt4.new) $Rdd32 = memd($Rs32)",
-PSEUDO, TypeMAPPING> {
+tc_65dc7cc4, TypeMAPPING> {
let isPseudo = 1;
let isCodeGenOnly = 1;
}
@@ -9900,7 +9954,7 @@ def L2_ploadrhf_io : HInst<
(outs IntRegs:$Rd32),
(ins PredRegs:$Pt4, IntRegs:$Rs32, u31_1Imm:$Ii),
"if (!$Pt4) $Rd32 = memh($Rs32+#$Ii)",
-V2LDST_tc_ld_SLOT01, TypeV2LDST>, Enc_1835415, AddrModeRel {
+tc_14da557c, TypeV2LDST>, Enc_a198f6, AddrModeRel {
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b01000101010;
let isPredicated = 1;
@@ -9922,7 +9976,7 @@ def L2_ploadrhf_pi : HInst<
(outs IntRegs:$Rd32, IntRegs:$Rx32),
(ins PredRegs:$Pt4, IntRegs:$Rx32in, s4_1Imm:$Ii),
"if (!$Pt4) $Rd32 = memh($Rx32++#$Ii)",
-LD_tc_ld_pi_SLOT01, TypeLD>, Enc_7212930, PredNewRel {
+tc_ae762521, TypeLD>, Enc_733b27, PredNewRel {
let Inst{13-11} = 0b101;
let Inst{31-21} = 0b10011011010;
let isPredicated = 1;
@@ -9939,7 +9993,7 @@ def L2_ploadrhf_zomap : HInst<
(outs IntRegs:$Rd32),
(ins PredRegs:$Pt4, IntRegs:$Rs32),
"if (!$Pt4) $Rd32 = memh($Rs32)",
-PSEUDO, TypeMAPPING> {
+tc_14da557c, TypeMAPPING> {
let hasNewValue = 1;
let opNewValue = 0;
let isPseudo = 1;
@@ -9949,7 +10003,7 @@ def L2_ploadrhfnew_io : HInst<
(outs IntRegs:$Rd32),
(ins PredRegs:$Pt4, IntRegs:$Rs32, u31_1Imm:$Ii),
"if (!$Pt4.new) $Rd32 = memh($Rs32+#$Ii)",
-V2LDST_tc_ld_SLOT01, TypeV2LDST>, Enc_1835415, AddrModeRel {
+tc_65dc7cc4, TypeV2LDST>, Enc_a198f6, AddrModeRel {
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b01000111010;
let isPredicated = 1;
@@ -9972,7 +10026,7 @@ def L2_ploadrhfnew_pi : HInst<
(outs IntRegs:$Rd32, IntRegs:$Rx32),
(ins PredRegs:$Pt4, IntRegs:$Rx32in, s4_1Imm:$Ii),
"if (!$Pt4.new) $Rd32 = memh($Rx32++#$Ii)",
-LD_tc_ld_pi_SLOT01, TypeLD>, Enc_7212930, PredNewRel {
+tc_e578178f, TypeLD>, Enc_733b27, PredNewRel {
let Inst{13-11} = 0b111;
let Inst{31-21} = 0b10011011010;
let isPredicated = 1;
@@ -9990,7 +10044,7 @@ def L2_ploadrhfnew_zomap : HInst<
(outs IntRegs:$Rd32),
(ins PredRegs:$Pt4, IntRegs:$Rs32),
"if (!$Pt4.new) $Rd32 = memh($Rs32)",
-PSEUDO, TypeMAPPING> {
+tc_65dc7cc4, TypeMAPPING> {
let hasNewValue = 1;
let opNewValue = 0;
let isPseudo = 1;
@@ -10000,7 +10054,7 @@ def L2_ploadrht_io : HInst<
(outs IntRegs:$Rd32),
(ins PredRegs:$Pt4, IntRegs:$Rs32, u31_1Imm:$Ii),
"if ($Pt4) $Rd32 = memh($Rs32+#$Ii)",
-V2LDST_tc_ld_SLOT01, TypeV2LDST>, Enc_1835415, AddrModeRel {
+tc_14da557c, TypeV2LDST>, Enc_a198f6, AddrModeRel {
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b01000001010;
let isPredicated = 1;
@@ -10021,7 +10075,7 @@ def L2_ploadrht_pi : HInst<
(outs IntRegs:$Rd32, IntRegs:$Rx32),
(ins PredRegs:$Pt4, IntRegs:$Rx32in, s4_1Imm:$Ii),
"if ($Pt4) $Rd32 = memh($Rx32++#$Ii)",
-LD_tc_ld_pi_SLOT01, TypeLD>, Enc_7212930, PredNewRel {
+tc_ae762521, TypeLD>, Enc_733b27, PredNewRel {
let Inst{13-11} = 0b100;
let Inst{31-21} = 0b10011011010;
let isPredicated = 1;
@@ -10037,7 +10091,7 @@ def L2_ploadrht_zomap : HInst<
(outs IntRegs:$Rd32),
(ins PredRegs:$Pt4, IntRegs:$Rs32),
"if ($Pt4) $Rd32 = memh($Rs32)",
-PSEUDO, TypeMAPPING> {
+tc_14da557c, TypeMAPPING> {
let hasNewValue = 1;
let opNewValue = 0;
let isPseudo = 1;
@@ -10047,7 +10101,7 @@ def L2_ploadrhtnew_io : HInst<
(outs IntRegs:$Rd32),
(ins PredRegs:$Pt4, IntRegs:$Rs32, u31_1Imm:$Ii),
"if ($Pt4.new) $Rd32 = memh($Rs32+#$Ii)",
-V2LDST_tc_ld_SLOT01, TypeV2LDST>, Enc_1835415, AddrModeRel {
+tc_65dc7cc4, TypeV2LDST>, Enc_a198f6, AddrModeRel {
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b01000011010;
let isPredicated = 1;
@@ -10069,7 +10123,7 @@ def L2_ploadrhtnew_pi : HInst<
(outs IntRegs:$Rd32, IntRegs:$Rx32),
(ins PredRegs:$Pt4, IntRegs:$Rx32in, s4_1Imm:$Ii),
"if ($Pt4.new) $Rd32 = memh($Rx32++#$Ii)",
-LD_tc_ld_pi_SLOT01, TypeLD>, Enc_7212930, PredNewRel {
+tc_e578178f, TypeLD>, Enc_733b27, PredNewRel {
let Inst{13-11} = 0b110;
let Inst{31-21} = 0b10011011010;
let isPredicated = 1;
@@ -10086,7 +10140,7 @@ def L2_ploadrhtnew_zomap : HInst<
(outs IntRegs:$Rd32),
(ins PredRegs:$Pt4, IntRegs:$Rs32),
"if ($Pt4.new) $Rd32 = memh($Rs32)",
-PSEUDO, TypeMAPPING> {
+tc_65dc7cc4, TypeMAPPING> {
let hasNewValue = 1;
let opNewValue = 0;
let isPseudo = 1;
@@ -10096,7 +10150,7 @@ def L2_ploadrif_io : HInst<
(outs IntRegs:$Rd32),
(ins PredRegs:$Pt4, IntRegs:$Rs32, u30_2Imm:$Ii),
"if (!$Pt4) $Rd32 = memw($Rs32+#$Ii)",
-V2LDST_tc_ld_SLOT01, TypeV2LDST>, Enc_2835415, AddrModeRel {
+tc_14da557c, TypeV2LDST>, Enc_f82eaf, AddrModeRel {
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b01000101100;
let isPredicated = 1;
@@ -10118,7 +10172,7 @@ def L2_ploadrif_pi : HInst<
(outs IntRegs:$Rd32, IntRegs:$Rx32),
(ins PredRegs:$Pt4, IntRegs:$Rx32in, s4_2Imm:$Ii),
"if (!$Pt4) $Rd32 = memw($Rx32++#$Ii)",
-LD_tc_ld_pi_SLOT01, TypeLD>, Enc_6212930, PredNewRel {
+tc_ae762521, TypeLD>, Enc_b97f71, PredNewRel {
let Inst{13-11} = 0b101;
let Inst{31-21} = 0b10011011100;
let isPredicated = 1;
@@ -10135,7 +10189,7 @@ def L2_ploadrif_zomap : HInst<
(outs IntRegs:$Rd32),
(ins PredRegs:$Pt4, IntRegs:$Rs32),
"if (!$Pt4) $Rd32 = memw($Rs32)",
-PSEUDO, TypeMAPPING> {
+tc_14da557c, TypeMAPPING> {
let hasNewValue = 1;
let opNewValue = 0;
let isPseudo = 1;
@@ -10145,7 +10199,7 @@ def L2_ploadrifnew_io : HInst<
(outs IntRegs:$Rd32),
(ins PredRegs:$Pt4, IntRegs:$Rs32, u30_2Imm:$Ii),
"if (!$Pt4.new) $Rd32 = memw($Rs32+#$Ii)",
-V2LDST_tc_ld_SLOT01, TypeV2LDST>, Enc_2835415, AddrModeRel {
+tc_65dc7cc4, TypeV2LDST>, Enc_f82eaf, AddrModeRel {
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b01000111100;
let isPredicated = 1;
@@ -10168,7 +10222,7 @@ def L2_ploadrifnew_pi : HInst<
(outs IntRegs:$Rd32, IntRegs:$Rx32),
(ins PredRegs:$Pt4, IntRegs:$Rx32in, s4_2Imm:$Ii),
"if (!$Pt4.new) $Rd32 = memw($Rx32++#$Ii)",
-LD_tc_ld_pi_SLOT01, TypeLD>, Enc_6212930, PredNewRel {
+tc_e578178f, TypeLD>, Enc_b97f71, PredNewRel {
let Inst{13-11} = 0b111;
let Inst{31-21} = 0b10011011100;
let isPredicated = 1;
@@ -10186,7 +10240,7 @@ def L2_ploadrifnew_zomap : HInst<
(outs IntRegs:$Rd32),
(ins PredRegs:$Pt4, IntRegs:$Rs32),
"if (!$Pt4.new) $Rd32 = memw($Rs32)",
-PSEUDO, TypeMAPPING> {
+tc_65dc7cc4, TypeMAPPING> {
let hasNewValue = 1;
let opNewValue = 0;
let isPseudo = 1;
@@ -10196,7 +10250,7 @@ def L2_ploadrit_io : HInst<
(outs IntRegs:$Rd32),
(ins PredRegs:$Pt4, IntRegs:$Rs32, u30_2Imm:$Ii),
"if ($Pt4) $Rd32 = memw($Rs32+#$Ii)",
-V2LDST_tc_ld_SLOT01, TypeV2LDST>, Enc_2835415, AddrModeRel {
+tc_14da557c, TypeV2LDST>, Enc_f82eaf, AddrModeRel {
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b01000001100;
let isPredicated = 1;
@@ -10217,7 +10271,7 @@ def L2_ploadrit_pi : HInst<
(outs IntRegs:$Rd32, IntRegs:$Rx32),
(ins PredRegs:$Pt4, IntRegs:$Rx32in, s4_2Imm:$Ii),
"if ($Pt4) $Rd32 = memw($Rx32++#$Ii)",
-LD_tc_ld_pi_SLOT01, TypeLD>, Enc_6212930, PredNewRel {
+tc_ae762521, TypeLD>, Enc_b97f71, PredNewRel {
let Inst{13-11} = 0b100;
let Inst{31-21} = 0b10011011100;
let isPredicated = 1;
@@ -10233,7 +10287,7 @@ def L2_ploadrit_zomap : HInst<
(outs IntRegs:$Rd32),
(ins PredRegs:$Pt4, IntRegs:$Rs32),
"if ($Pt4) $Rd32 = memw($Rs32)",
-PSEUDO, TypeMAPPING> {
+tc_14da557c, TypeMAPPING> {
let hasNewValue = 1;
let opNewValue = 0;
let isPseudo = 1;
@@ -10243,7 +10297,7 @@ def L2_ploadritnew_io : HInst<
(outs IntRegs:$Rd32),
(ins PredRegs:$Pt4, IntRegs:$Rs32, u30_2Imm:$Ii),
"if ($Pt4.new) $Rd32 = memw($Rs32+#$Ii)",
-V2LDST_tc_ld_SLOT01, TypeV2LDST>, Enc_2835415, AddrModeRel {
+tc_65dc7cc4, TypeV2LDST>, Enc_f82eaf, AddrModeRel {
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b01000011100;
let isPredicated = 1;
@@ -10265,7 +10319,7 @@ def L2_ploadritnew_pi : HInst<
(outs IntRegs:$Rd32, IntRegs:$Rx32),
(ins PredRegs:$Pt4, IntRegs:$Rx32in, s4_2Imm:$Ii),
"if ($Pt4.new) $Rd32 = memw($Rx32++#$Ii)",
-LD_tc_ld_pi_SLOT01, TypeLD>, Enc_6212930, PredNewRel {
+tc_e578178f, TypeLD>, Enc_b97f71, PredNewRel {
let Inst{13-11} = 0b110;
let Inst{31-21} = 0b10011011100;
let isPredicated = 1;
@@ -10282,7 +10336,7 @@ def L2_ploadritnew_zomap : HInst<
(outs IntRegs:$Rd32),
(ins PredRegs:$Pt4, IntRegs:$Rs32),
"if ($Pt4.new) $Rd32 = memw($Rs32)",
-PSEUDO, TypeMAPPING> {
+tc_65dc7cc4, TypeMAPPING> {
let hasNewValue = 1;
let opNewValue = 0;
let isPseudo = 1;
@@ -10292,7 +10346,7 @@ def L2_ploadrubf_io : HInst<
(outs IntRegs:$Rd32),
(ins PredRegs:$Pt4, IntRegs:$Rs32, u32_0Imm:$Ii),
"if (!$Pt4) $Rd32 = memub($Rs32+#$Ii)",
-V2LDST_tc_ld_SLOT01, TypeV2LDST>, Enc_4835423, AddrModeRel {
+tc_14da557c, TypeV2LDST>, Enc_a21d47, AddrModeRel {
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b01000101001;
let isPredicated = 1;
@@ -10314,7 +10368,7 @@ def L2_ploadrubf_pi : HInst<
(outs IntRegs:$Rd32, IntRegs:$Rx32),
(ins PredRegs:$Pt4, IntRegs:$Rx32in, s4_0Imm:$Ii),
"if (!$Pt4) $Rd32 = memub($Rx32++#$Ii)",
-LD_tc_ld_pi_SLOT01, TypeLD>, Enc_12212978, PredNewRel {
+tc_ae762521, TypeLD>, Enc_f4413a, PredNewRel {
let Inst{13-11} = 0b101;
let Inst{31-21} = 0b10011011001;
let isPredicated = 1;
@@ -10331,7 +10385,7 @@ def L2_ploadrubf_zomap : HInst<
(outs IntRegs:$Rd32),
(ins PredRegs:$Pt4, IntRegs:$Rs32),
"if (!$Pt4) $Rd32 = memub($Rs32)",
-PSEUDO, TypeMAPPING> {
+tc_14da557c, TypeMAPPING> {
let hasNewValue = 1;
let opNewValue = 0;
let isPseudo = 1;
@@ -10341,7 +10395,7 @@ def L2_ploadrubfnew_io : HInst<
(outs IntRegs:$Rd32),
(ins PredRegs:$Pt4, IntRegs:$Rs32, u32_0Imm:$Ii),
"if (!$Pt4.new) $Rd32 = memub($Rs32+#$Ii)",
-V2LDST_tc_ld_SLOT01, TypeV2LDST>, Enc_4835423, AddrModeRel {
+tc_65dc7cc4, TypeV2LDST>, Enc_a21d47, AddrModeRel {
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b01000111001;
let isPredicated = 1;
@@ -10364,7 +10418,7 @@ def L2_ploadrubfnew_pi : HInst<
(outs IntRegs:$Rd32, IntRegs:$Rx32),
(ins PredRegs:$Pt4, IntRegs:$Rx32in, s4_0Imm:$Ii),
"if (!$Pt4.new) $Rd32 = memub($Rx32++#$Ii)",
-LD_tc_ld_pi_SLOT01, TypeLD>, Enc_12212978, PredNewRel {
+tc_e578178f, TypeLD>, Enc_f4413a, PredNewRel {
let Inst{13-11} = 0b111;
let Inst{31-21} = 0b10011011001;
let isPredicated = 1;
@@ -10382,7 +10436,7 @@ def L2_ploadrubfnew_zomap : HInst<
(outs IntRegs:$Rd32),
(ins PredRegs:$Pt4, IntRegs:$Rs32),
"if (!$Pt4.new) $Rd32 = memub($Rs32)",
-PSEUDO, TypeMAPPING> {
+tc_65dc7cc4, TypeMAPPING> {
let hasNewValue = 1;
let opNewValue = 0;
let isPseudo = 1;
@@ -10392,7 +10446,7 @@ def L2_ploadrubt_io : HInst<
(outs IntRegs:$Rd32),
(ins PredRegs:$Pt4, IntRegs:$Rs32, u32_0Imm:$Ii),
"if ($Pt4) $Rd32 = memub($Rs32+#$Ii)",
-V2LDST_tc_ld_SLOT01, TypeV2LDST>, Enc_4835423, AddrModeRel {
+tc_14da557c, TypeV2LDST>, Enc_a21d47, AddrModeRel {
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b01000001001;
let isPredicated = 1;
@@ -10413,7 +10467,7 @@ def L2_ploadrubt_pi : HInst<
(outs IntRegs:$Rd32, IntRegs:$Rx32),
(ins PredRegs:$Pt4, IntRegs:$Rx32in, s4_0Imm:$Ii),
"if ($Pt4) $Rd32 = memub($Rx32++#$Ii)",
-LD_tc_ld_pi_SLOT01, TypeLD>, Enc_12212978, PredNewRel {
+tc_ae762521, TypeLD>, Enc_f4413a, PredNewRel {
let Inst{13-11} = 0b100;
let Inst{31-21} = 0b10011011001;
let isPredicated = 1;
@@ -10429,7 +10483,7 @@ def L2_ploadrubt_zomap : HInst<
(outs IntRegs:$Rd32),
(ins PredRegs:$Pt4, IntRegs:$Rs32),
"if ($Pt4) $Rd32 = memub($Rs32)",
-PSEUDO, TypeMAPPING> {
+tc_14da557c, TypeMAPPING> {
let hasNewValue = 1;
let opNewValue = 0;
let isPseudo = 1;
@@ -10439,7 +10493,7 @@ def L2_ploadrubtnew_io : HInst<
(outs IntRegs:$Rd32),
(ins PredRegs:$Pt4, IntRegs:$Rs32, u32_0Imm:$Ii),
"if ($Pt4.new) $Rd32 = memub($Rs32+#$Ii)",
-V2LDST_tc_ld_SLOT01, TypeV2LDST>, Enc_4835423, AddrModeRel {
+tc_65dc7cc4, TypeV2LDST>, Enc_a21d47, AddrModeRel {
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b01000011001;
let isPredicated = 1;
@@ -10461,7 +10515,7 @@ def L2_ploadrubtnew_pi : HInst<
(outs IntRegs:$Rd32, IntRegs:$Rx32),
(ins PredRegs:$Pt4, IntRegs:$Rx32in, s4_0Imm:$Ii),
"if ($Pt4.new) $Rd32 = memub($Rx32++#$Ii)",
-LD_tc_ld_pi_SLOT01, TypeLD>, Enc_12212978, PredNewRel {
+tc_e578178f, TypeLD>, Enc_f4413a, PredNewRel {
let Inst{13-11} = 0b110;
let Inst{31-21} = 0b10011011001;
let isPredicated = 1;
@@ -10478,7 +10532,7 @@ def L2_ploadrubtnew_zomap : HInst<
(outs IntRegs:$Rd32),
(ins PredRegs:$Pt4, IntRegs:$Rs32),
"if ($Pt4.new) $Rd32 = memub($Rs32)",
-PSEUDO, TypeMAPPING> {
+tc_65dc7cc4, TypeMAPPING> {
let hasNewValue = 1;
let opNewValue = 0;
let isPseudo = 1;
@@ -10488,7 +10542,7 @@ def L2_ploadruhf_io : HInst<
(outs IntRegs:$Rd32),
(ins PredRegs:$Pt4, IntRegs:$Rs32, u31_1Imm:$Ii),
"if (!$Pt4) $Rd32 = memuh($Rs32+#$Ii)",
-V2LDST_tc_ld_SLOT01, TypeV2LDST>, Enc_1835415, AddrModeRel {
+tc_14da557c, TypeV2LDST>, Enc_a198f6, AddrModeRel {
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b01000101011;
let isPredicated = 1;
@@ -10510,7 +10564,7 @@ def L2_ploadruhf_pi : HInst<
(outs IntRegs:$Rd32, IntRegs:$Rx32),
(ins PredRegs:$Pt4, IntRegs:$Rx32in, s4_1Imm:$Ii),
"if (!$Pt4) $Rd32 = memuh($Rx32++#$Ii)",
-LD_tc_ld_pi_SLOT01, TypeLD>, Enc_7212930, PredNewRel {
+tc_ae762521, TypeLD>, Enc_733b27, PredNewRel {
let Inst{13-11} = 0b101;
let Inst{31-21} = 0b10011011011;
let isPredicated = 1;
@@ -10527,7 +10581,7 @@ def L2_ploadruhf_zomap : HInst<
(outs IntRegs:$Rd32),
(ins PredRegs:$Pt4, IntRegs:$Rs32),
"if (!$Pt4) $Rd32 = memuh($Rs32)",
-PSEUDO, TypeMAPPING> {
+tc_14da557c, TypeMAPPING> {
let hasNewValue = 1;
let opNewValue = 0;
let isPseudo = 1;
@@ -10537,7 +10591,7 @@ def L2_ploadruhfnew_io : HInst<
(outs IntRegs:$Rd32),
(ins PredRegs:$Pt4, IntRegs:$Rs32, u31_1Imm:$Ii),
"if (!$Pt4.new) $Rd32 = memuh($Rs32+#$Ii)",
-V2LDST_tc_ld_SLOT01, TypeV2LDST>, Enc_1835415, AddrModeRel {
+tc_65dc7cc4, TypeV2LDST>, Enc_a198f6, AddrModeRel {
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b01000111011;
let isPredicated = 1;
@@ -10560,7 +10614,7 @@ def L2_ploadruhfnew_pi : HInst<
(outs IntRegs:$Rd32, IntRegs:$Rx32),
(ins PredRegs:$Pt4, IntRegs:$Rx32in, s4_1Imm:$Ii),
"if (!$Pt4.new) $Rd32 = memuh($Rx32++#$Ii)",
-LD_tc_ld_pi_SLOT01, TypeLD>, Enc_7212930, PredNewRel {
+tc_e578178f, TypeLD>, Enc_733b27, PredNewRel {
let Inst{13-11} = 0b111;
let Inst{31-21} = 0b10011011011;
let isPredicated = 1;
@@ -10578,7 +10632,7 @@ def L2_ploadruhfnew_zomap : HInst<
(outs IntRegs:$Rd32),
(ins PredRegs:$Pt4, IntRegs:$Rs32),
"if (!$Pt4.new) $Rd32 = memuh($Rs32)",
-PSEUDO, TypeMAPPING> {
+tc_65dc7cc4, TypeMAPPING> {
let hasNewValue = 1;
let opNewValue = 0;
let isPseudo = 1;
@@ -10588,7 +10642,7 @@ def L2_ploadruht_io : HInst<
(outs IntRegs:$Rd32),
(ins PredRegs:$Pt4, IntRegs:$Rs32, u31_1Imm:$Ii),
"if ($Pt4) $Rd32 = memuh($Rs32+#$Ii)",
-V2LDST_tc_ld_SLOT01, TypeV2LDST>, Enc_1835415, AddrModeRel {
+tc_14da557c, TypeV2LDST>, Enc_a198f6, AddrModeRel {
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b01000001011;
let isPredicated = 1;
@@ -10609,7 +10663,7 @@ def L2_ploadruht_pi : HInst<
(outs IntRegs:$Rd32, IntRegs:$Rx32),
(ins PredRegs:$Pt4, IntRegs:$Rx32in, s4_1Imm:$Ii),
"if ($Pt4) $Rd32 = memuh($Rx32++#$Ii)",
-LD_tc_ld_pi_SLOT01, TypeLD>, Enc_7212930, PredNewRel {
+tc_ae762521, TypeLD>, Enc_733b27, PredNewRel {
let Inst{13-11} = 0b100;
let Inst{31-21} = 0b10011011011;
let isPredicated = 1;
@@ -10625,7 +10679,7 @@ def L2_ploadruht_zomap : HInst<
(outs IntRegs:$Rd32),
(ins PredRegs:$Pt4, IntRegs:$Rs32),
"if ($Pt4) $Rd32 = memuh($Rs32)",
-PSEUDO, TypeMAPPING> {
+tc_14da557c, TypeMAPPING> {
let hasNewValue = 1;
let opNewValue = 0;
let isPseudo = 1;
@@ -10635,7 +10689,7 @@ def L2_ploadruhtnew_io : HInst<
(outs IntRegs:$Rd32),
(ins PredRegs:$Pt4, IntRegs:$Rs32, u31_1Imm:$Ii),
"if ($Pt4.new) $Rd32 = memuh($Rs32+#$Ii)",
-V2LDST_tc_ld_SLOT01, TypeV2LDST>, Enc_1835415, AddrModeRel {
+tc_65dc7cc4, TypeV2LDST>, Enc_a198f6, AddrModeRel {
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b01000011011;
let isPredicated = 1;
@@ -10657,7 +10711,7 @@ def L2_ploadruhtnew_pi : HInst<
(outs IntRegs:$Rd32, IntRegs:$Rx32),
(ins PredRegs:$Pt4, IntRegs:$Rx32in, s4_1Imm:$Ii),
"if ($Pt4.new) $Rd32 = memuh($Rx32++#$Ii)",
-LD_tc_ld_pi_SLOT01, TypeLD>, Enc_7212930, PredNewRel {
+tc_e578178f, TypeLD>, Enc_733b27, PredNewRel {
let Inst{13-11} = 0b110;
let Inst{31-21} = 0b10011011011;
let isPredicated = 1;
@@ -10674,7 +10728,7 @@ def L2_ploadruhtnew_zomap : HInst<
(outs IntRegs:$Rd32),
(ins PredRegs:$Pt4, IntRegs:$Rs32),
"if ($Pt4.new) $Rd32 = memuh($Rs32)",
-PSEUDO, TypeMAPPING> {
+tc_65dc7cc4, TypeMAPPING> {
let hasNewValue = 1;
let opNewValue = 0;
let isPseudo = 1;
@@ -10684,14 +10738,14 @@ def L4_add_memopb_io : HInst<
(outs),
(ins IntRegs:$Rs32, u32_0Imm:$Ii, IntRegs:$Rt32),
"memb($Rs32+#$Ii) += $Rt32",
-V4LDST_tc_st_SLOT0, TypeV4LDST>, Enc_11849200 {
+tc_a9c993d9, TypeV4LDST>, Enc_d44e31 {
let Inst{6-5} = 0b00;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00111110000;
let addrMode = BaseImmOffset;
let accessSize = ByteAccess;
-let mayStore = 1;
let mayLoad = 1;
+let mayStore = 1;
let isExtendable = 1;
let opExtendable = 1;
let isExtentSigned = 0;
@@ -10702,7 +10756,7 @@ def L4_add_memopb_zomap : HInst<
(outs),
(ins IntRegs:$Rs32, IntRegs:$Rt32),
"memb($Rs32) += $Rt32",
-PSEUDO, TypeMAPPING> {
+tc_a9c993d9, TypeMAPPING> {
let isPseudo = 1;
let isCodeGenOnly = 1;
}
@@ -10710,14 +10764,14 @@ def L4_add_memoph_io : HInst<
(outs),
(ins IntRegs:$Rs32, u31_1Imm:$Ii, IntRegs:$Rt32),
"memh($Rs32+#$Ii) += $Rt32",
-V4LDST_tc_st_SLOT0, TypeV4LDST>, Enc_8849208 {
+tc_a9c993d9, TypeV4LDST>, Enc_163a3c {
let Inst{6-5} = 0b00;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00111110001;
let addrMode = BaseImmOffset;
let accessSize = HalfWordAccess;
-let mayStore = 1;
let mayLoad = 1;
+let mayStore = 1;
let isExtendable = 1;
let opExtendable = 1;
let isExtentSigned = 0;
@@ -10728,7 +10782,7 @@ def L4_add_memoph_zomap : HInst<
(outs),
(ins IntRegs:$Rs32, IntRegs:$Rt32),
"memh($Rs32) += $Rt32",
-PSEUDO, TypeMAPPING> {
+tc_a9c993d9, TypeMAPPING> {
let isPseudo = 1;
let isCodeGenOnly = 1;
}
@@ -10736,14 +10790,14 @@ def L4_add_memopw_io : HInst<
(outs),
(ins IntRegs:$Rs32, u30_2Imm:$Ii, IntRegs:$Rt32),
"memw($Rs32+#$Ii) += $Rt32",
-V4LDST_tc_st_SLOT0, TypeV4LDST>, Enc_9849208 {
+tc_a9c993d9, TypeV4LDST>, Enc_226535 {
let Inst{6-5} = 0b00;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00111110010;
let addrMode = BaseImmOffset;
let accessSize = WordAccess;
-let mayStore = 1;
let mayLoad = 1;
+let mayStore = 1;
let isExtendable = 1;
let opExtendable = 1;
let isExtentSigned = 0;
@@ -10754,7 +10808,7 @@ def L4_add_memopw_zomap : HInst<
(outs),
(ins IntRegs:$Rs32, IntRegs:$Rt32),
"memw($Rs32) += $Rt32",
-PSEUDO, TypeMAPPING> {
+tc_a9c993d9, TypeMAPPING> {
let isPseudo = 1;
let isCodeGenOnly = 1;
}
@@ -10762,14 +10816,14 @@ def L4_and_memopb_io : HInst<
(outs),
(ins IntRegs:$Rs32, u32_0Imm:$Ii, IntRegs:$Rt32),
"memb($Rs32+#$Ii) &= $Rt32",
-V4LDST_tc_st_SLOT0, TypeV4LDST>, Enc_11849200 {
+tc_a9c993d9, TypeV4LDST>, Enc_d44e31 {
let Inst{6-5} = 0b10;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00111110000;
let addrMode = BaseImmOffset;
let accessSize = ByteAccess;
-let mayStore = 1;
let mayLoad = 1;
+let mayStore = 1;
let isExtendable = 1;
let opExtendable = 1;
let isExtentSigned = 0;
@@ -10780,7 +10834,7 @@ def L4_and_memopb_zomap : HInst<
(outs),
(ins IntRegs:$Rs32, IntRegs:$Rt32),
"memb($Rs32) &= $Rt32",
-PSEUDO, TypeMAPPING> {
+tc_a9c993d9, TypeMAPPING> {
let isPseudo = 1;
let isCodeGenOnly = 1;
}
@@ -10788,14 +10842,14 @@ def L4_and_memoph_io : HInst<
(outs),
(ins IntRegs:$Rs32, u31_1Imm:$Ii, IntRegs:$Rt32),
"memh($Rs32+#$Ii) &= $Rt32",
-V4LDST_tc_st_SLOT0, TypeV4LDST>, Enc_8849208 {
+tc_a9c993d9, TypeV4LDST>, Enc_163a3c {
let Inst{6-5} = 0b10;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00111110001;
let addrMode = BaseImmOffset;
let accessSize = HalfWordAccess;
-let mayStore = 1;
let mayLoad = 1;
+let mayStore = 1;
let isExtendable = 1;
let opExtendable = 1;
let isExtentSigned = 0;
@@ -10806,7 +10860,7 @@ def L4_and_memoph_zomap : HInst<
(outs),
(ins IntRegs:$Rs32, IntRegs:$Rt32),
"memh($Rs32) &= $Rt32",
-PSEUDO, TypeMAPPING> {
+tc_a9c993d9, TypeMAPPING> {
let isPseudo = 1;
let isCodeGenOnly = 1;
}
@@ -10814,14 +10868,14 @@ def L4_and_memopw_io : HInst<
(outs),
(ins IntRegs:$Rs32, u30_2Imm:$Ii, IntRegs:$Rt32),
"memw($Rs32+#$Ii) &= $Rt32",
-V4LDST_tc_st_SLOT0, TypeV4LDST>, Enc_9849208 {
+tc_a9c993d9, TypeV4LDST>, Enc_226535 {
let Inst{6-5} = 0b10;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00111110010;
let addrMode = BaseImmOffset;
let accessSize = WordAccess;
-let mayStore = 1;
let mayLoad = 1;
+let mayStore = 1;
let isExtendable = 1;
let opExtendable = 1;
let isExtentSigned = 0;
@@ -10832,7 +10886,7 @@ def L4_and_memopw_zomap : HInst<
(outs),
(ins IntRegs:$Rs32, IntRegs:$Rt32),
"memw($Rs32) &= $Rt32",
-PSEUDO, TypeMAPPING> {
+tc_a9c993d9, TypeMAPPING> {
let isPseudo = 1;
let isCodeGenOnly = 1;
}
@@ -10840,14 +10894,14 @@ def L4_iadd_memopb_io : HInst<
(outs),
(ins IntRegs:$Rs32, u32_0Imm:$Ii, u5_0Imm:$II),
"memb($Rs32+#$Ii) += #$II",
-V4LDST_tc_st_SLOT0, TypeV4LDST>, Enc_6773159 {
+tc_da79106e, TypeV4LDST>, Enc_46c951 {
let Inst{6-5} = 0b00;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00111111000;
let addrMode = BaseImmOffset;
let accessSize = ByteAccess;
-let mayStore = 1;
let mayLoad = 1;
+let mayStore = 1;
let isExtendable = 1;
let opExtendable = 1;
let isExtentSigned = 0;
@@ -10858,7 +10912,7 @@ def L4_iadd_memopb_zomap : HInst<
(outs),
(ins IntRegs:$Rs32, u5_0Imm:$II),
"memb($Rs32) += #$II",
-PSEUDO, TypeMAPPING> {
+tc_da79106e, TypeMAPPING> {
let isPseudo = 1;
let isCodeGenOnly = 1;
}
@@ -10866,14 +10920,14 @@ def L4_iadd_memoph_io : HInst<
(outs),
(ins IntRegs:$Rs32, u31_1Imm:$Ii, u5_0Imm:$II),
"memh($Rs32+#$Ii) += #$II",
-V4LDST_tc_st_SLOT0, TypeV4LDST>, Enc_9773167 {
+tc_da79106e, TypeV4LDST>, Enc_e66a97 {
let Inst{6-5} = 0b00;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00111111001;
let addrMode = BaseImmOffset;
let accessSize = HalfWordAccess;
-let mayStore = 1;
let mayLoad = 1;
+let mayStore = 1;
let isExtendable = 1;
let opExtendable = 1;
let isExtentSigned = 0;
@@ -10884,7 +10938,7 @@ def L4_iadd_memoph_zomap : HInst<
(outs),
(ins IntRegs:$Rs32, u5_0Imm:$II),
"memh($Rs32) += #$II",
-PSEUDO, TypeMAPPING> {
+tc_da79106e, TypeMAPPING> {
let isPseudo = 1;
let isCodeGenOnly = 1;
}
@@ -10892,14 +10946,14 @@ def L4_iadd_memopw_io : HInst<
(outs),
(ins IntRegs:$Rs32, u30_2Imm:$Ii, u5_0Imm:$II),
"memw($Rs32+#$Ii) += #$II",
-V4LDST_tc_st_SLOT0, TypeV4LDST>, Enc_8773155 {
+tc_da79106e, TypeV4LDST>, Enc_84b2cd {
let Inst{6-5} = 0b00;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00111111010;
let addrMode = BaseImmOffset;
let accessSize = WordAccess;
-let mayStore = 1;
let mayLoad = 1;
+let mayStore = 1;
let isExtendable = 1;
let opExtendable = 1;
let isExtentSigned = 0;
@@ -10910,7 +10964,7 @@ def L4_iadd_memopw_zomap : HInst<
(outs),
(ins IntRegs:$Rs32, u5_0Imm:$II),
"memw($Rs32) += #$II",
-PSEUDO, TypeMAPPING> {
+tc_da79106e, TypeMAPPING> {
let isPseudo = 1;
let isCodeGenOnly = 1;
}
@@ -10918,14 +10972,14 @@ def L4_iand_memopb_io : HInst<
(outs),
(ins IntRegs:$Rs32, u32_0Imm:$Ii, u5_0Imm:$II),
"memb($Rs32+#$Ii) = clrbit(#$II)",
-V4LDST_tc_st_SLOT0, TypeV4LDST>, Enc_6773159 {
+tc_da79106e, TypeV4LDST>, Enc_46c951 {
let Inst{6-5} = 0b10;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00111111000;
let addrMode = BaseImmOffset;
let accessSize = ByteAccess;
-let mayStore = 1;
let mayLoad = 1;
+let mayStore = 1;
let isExtendable = 1;
let opExtendable = 1;
let isExtentSigned = 0;
@@ -10936,7 +10990,7 @@ def L4_iand_memopb_zomap : HInst<
(outs),
(ins IntRegs:$Rs32, u5_0Imm:$II),
"memb($Rs32) = clrbit(#$II)",
-PSEUDO, TypeMAPPING> {
+tc_da79106e, TypeMAPPING> {
let isPseudo = 1;
let isCodeGenOnly = 1;
}
@@ -10944,14 +10998,14 @@ def L4_iand_memoph_io : HInst<
(outs),
(ins IntRegs:$Rs32, u31_1Imm:$Ii, u5_0Imm:$II),
"memh($Rs32+#$Ii) = clrbit(#$II)",
-V4LDST_tc_st_SLOT0, TypeV4LDST>, Enc_9773167 {
+tc_da79106e, TypeV4LDST>, Enc_e66a97 {
let Inst{6-5} = 0b10;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00111111001;
let addrMode = BaseImmOffset;
let accessSize = HalfWordAccess;
-let mayStore = 1;
let mayLoad = 1;
+let mayStore = 1;
let isExtendable = 1;
let opExtendable = 1;
let isExtentSigned = 0;
@@ -10962,7 +11016,7 @@ def L4_iand_memoph_zomap : HInst<
(outs),
(ins IntRegs:$Rs32, u5_0Imm:$II),
"memh($Rs32) = clrbit(#$II)",
-PSEUDO, TypeMAPPING> {
+tc_da79106e, TypeMAPPING> {
let isPseudo = 1;
let isCodeGenOnly = 1;
}
@@ -10970,14 +11024,14 @@ def L4_iand_memopw_io : HInst<
(outs),
(ins IntRegs:$Rs32, u30_2Imm:$Ii, u5_0Imm:$II),
"memw($Rs32+#$Ii) = clrbit(#$II)",
-V4LDST_tc_st_SLOT0, TypeV4LDST>, Enc_8773155 {
+tc_da79106e, TypeV4LDST>, Enc_84b2cd {
let Inst{6-5} = 0b10;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00111111010;
let addrMode = BaseImmOffset;
let accessSize = WordAccess;
-let mayStore = 1;
let mayLoad = 1;
+let mayStore = 1;
let isExtendable = 1;
let opExtendable = 1;
let isExtentSigned = 0;
@@ -10988,7 +11042,7 @@ def L4_iand_memopw_zomap : HInst<
(outs),
(ins IntRegs:$Rs32, u5_0Imm:$II),
"memw($Rs32) = clrbit(#$II)",
-PSEUDO, TypeMAPPING> {
+tc_da79106e, TypeMAPPING> {
let isPseudo = 1;
let isCodeGenOnly = 1;
}
@@ -10996,14 +11050,14 @@ def L4_ior_memopb_io : HInst<
(outs),
(ins IntRegs:$Rs32, u32_0Imm:$Ii, u5_0Imm:$II),
"memb($Rs32+#$Ii) = setbit(#$II)",
-V4LDST_tc_st_SLOT0, TypeV4LDST>, Enc_6773159 {
+tc_da79106e, TypeV4LDST>, Enc_46c951 {
let Inst{6-5} = 0b11;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00111111000;
let addrMode = BaseImmOffset;
let accessSize = ByteAccess;
-let mayStore = 1;
let mayLoad = 1;
+let mayStore = 1;
let isExtendable = 1;
let opExtendable = 1;
let isExtentSigned = 0;
@@ -11014,7 +11068,7 @@ def L4_ior_memopb_zomap : HInst<
(outs),
(ins IntRegs:$Rs32, u5_0Imm:$II),
"memb($Rs32) = setbit(#$II)",
-PSEUDO, TypeMAPPING> {
+tc_da79106e, TypeMAPPING> {
let isPseudo = 1;
let isCodeGenOnly = 1;
}
@@ -11022,14 +11076,14 @@ def L4_ior_memoph_io : HInst<
(outs),
(ins IntRegs:$Rs32, u31_1Imm:$Ii, u5_0Imm:$II),
"memh($Rs32+#$Ii) = setbit(#$II)",
-V4LDST_tc_st_SLOT0, TypeV4LDST>, Enc_9773167 {
+tc_da79106e, TypeV4LDST>, Enc_e66a97 {
let Inst{6-5} = 0b11;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00111111001;
let addrMode = BaseImmOffset;
let accessSize = HalfWordAccess;
-let mayStore = 1;
let mayLoad = 1;
+let mayStore = 1;
let isExtendable = 1;
let opExtendable = 1;
let isExtentSigned = 0;
@@ -11040,7 +11094,7 @@ def L4_ior_memoph_zomap : HInst<
(outs),
(ins IntRegs:$Rs32, u5_0Imm:$II),
"memh($Rs32) = setbit(#$II)",
-PSEUDO, TypeMAPPING> {
+tc_da79106e, TypeMAPPING> {
let isPseudo = 1;
let isCodeGenOnly = 1;
}
@@ -11048,14 +11102,14 @@ def L4_ior_memopw_io : HInst<
(outs),
(ins IntRegs:$Rs32, u30_2Imm:$Ii, u5_0Imm:$II),
"memw($Rs32+#$Ii) = setbit(#$II)",
-V4LDST_tc_st_SLOT0, TypeV4LDST>, Enc_8773155 {
+tc_da79106e, TypeV4LDST>, Enc_84b2cd {
let Inst{6-5} = 0b11;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00111111010;
let addrMode = BaseImmOffset;
let accessSize = WordAccess;
-let mayStore = 1;
let mayLoad = 1;
+let mayStore = 1;
let isExtendable = 1;
let opExtendable = 1;
let isExtentSigned = 0;
@@ -11066,7 +11120,7 @@ def L4_ior_memopw_zomap : HInst<
(outs),
(ins IntRegs:$Rs32, u5_0Imm:$II),
"memw($Rs32) = setbit(#$II)",
-PSEUDO, TypeMAPPING> {
+tc_da79106e, TypeMAPPING> {
let isPseudo = 1;
let isCodeGenOnly = 1;
}
@@ -11074,14 +11128,14 @@ def L4_isub_memopb_io : HInst<
(outs),
(ins IntRegs:$Rs32, u32_0Imm:$Ii, u5_0Imm:$II),
"memb($Rs32+#$Ii) -= #$II",
-V4LDST_tc_st_SLOT0, TypeV4LDST>, Enc_6773159 {
+tc_da79106e, TypeV4LDST>, Enc_46c951 {
let Inst{6-5} = 0b01;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00111111000;
let addrMode = BaseImmOffset;
let accessSize = ByteAccess;
-let mayStore = 1;
let mayLoad = 1;
+let mayStore = 1;
let isExtendable = 1;
let opExtendable = 1;
let isExtentSigned = 0;
@@ -11092,7 +11146,7 @@ def L4_isub_memopb_zomap : HInst<
(outs),
(ins IntRegs:$Rs32, u5_0Imm:$II),
"memb($Rs32) -= #$II",
-PSEUDO, TypeMAPPING> {
+tc_da79106e, TypeMAPPING> {
let isPseudo = 1;
let isCodeGenOnly = 1;
}
@@ -11100,14 +11154,14 @@ def L4_isub_memoph_io : HInst<
(outs),
(ins IntRegs:$Rs32, u31_1Imm:$Ii, u5_0Imm:$II),
"memh($Rs32+#$Ii) -= #$II",
-V4LDST_tc_st_SLOT0, TypeV4LDST>, Enc_9773167 {
+tc_da79106e, TypeV4LDST>, Enc_e66a97 {
let Inst{6-5} = 0b01;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00111111001;
let addrMode = BaseImmOffset;
let accessSize = HalfWordAccess;
-let mayStore = 1;
let mayLoad = 1;
+let mayStore = 1;
let isExtendable = 1;
let opExtendable = 1;
let isExtentSigned = 0;
@@ -11118,7 +11172,7 @@ def L4_isub_memoph_zomap : HInst<
(outs),
(ins IntRegs:$Rs32, u5_0Imm:$II),
"memh($Rs32) -= #$II",
-PSEUDO, TypeMAPPING> {
+tc_da79106e, TypeMAPPING> {
let isPseudo = 1;
let isCodeGenOnly = 1;
}
@@ -11126,14 +11180,14 @@ def L4_isub_memopw_io : HInst<
(outs),
(ins IntRegs:$Rs32, u30_2Imm:$Ii, u5_0Imm:$II),
"memw($Rs32+#$Ii) -= #$II",
-V4LDST_tc_st_SLOT0, TypeV4LDST>, Enc_8773155 {
+tc_da79106e, TypeV4LDST>, Enc_84b2cd {
let Inst{6-5} = 0b01;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00111111010;
let addrMode = BaseImmOffset;
let accessSize = WordAccess;
-let mayStore = 1;
let mayLoad = 1;
+let mayStore = 1;
let isExtendable = 1;
let opExtendable = 1;
let isExtentSigned = 0;
@@ -11144,7 +11198,7 @@ def L4_isub_memopw_zomap : HInst<
(outs),
(ins IntRegs:$Rs32, u5_0Imm:$II),
"memw($Rs32) -= #$II",
-PSEUDO, TypeMAPPING> {
+tc_da79106e, TypeMAPPING> {
let isPseudo = 1;
let isCodeGenOnly = 1;
}
@@ -11152,7 +11206,7 @@ def L4_loadalignb_ap : HInst<
(outs DoubleRegs:$Ryy32, IntRegs:$Re32),
(ins DoubleRegs:$Ryy32in, u32_0Imm:$II),
"$Ryy32 = memb_fifo($Re32=#$II)",
-LD_tc_ld_SLOT01, TypeLD>, Enc_11047413 {
+tc_261d9b78, TypeLD>, Enc_f394d3 {
let Inst{7-7} = 0b0;
let Inst{13-12} = 0b01;
let Inst{31-21} = 0b10011010100;
@@ -11160,8 +11214,8 @@ let hasNewValue = 1;
let opNewValue = 1;
let addrMode = AbsoluteSet;
let accessSize = ByteAccess;
-let isExtended = 1;
let mayLoad = 1;
+let isExtended = 1;
let DecoderNamespace = "MustExtend";
let isExtendable = 1;
let opExtendable = 3;
@@ -11174,13 +11228,13 @@ def L4_loadalignb_ur : HInst<
(outs DoubleRegs:$Ryy32),
(ins DoubleRegs:$Ryy32in, IntRegs:$Rt32, u2_0Imm:$Ii, u32_0Imm:$II),
"$Ryy32 = memb_fifo($Rt32<<#$Ii+#$II)",
-LD_tc_ld_SLOT01, TypeLD>, Enc_7303598 {
+tc_baccf077, TypeLD>, Enc_04c959 {
let Inst{12-12} = 0b1;
let Inst{31-21} = 0b10011100100;
let addrMode = BaseLongOffset;
let accessSize = ByteAccess;
-let isExtended = 1;
let mayLoad = 1;
+let isExtended = 1;
let InputType = "imm";
let DecoderNamespace = "MustExtend";
let isExtendable = 1;
@@ -11194,7 +11248,7 @@ def L4_loadalignh_ap : HInst<
(outs DoubleRegs:$Ryy32, IntRegs:$Re32),
(ins DoubleRegs:$Ryy32in, u32_0Imm:$II),
"$Ryy32 = memh_fifo($Re32=#$II)",
-LD_tc_ld_SLOT01, TypeLD>, Enc_11047413 {
+tc_261d9b78, TypeLD>, Enc_f394d3 {
let Inst{7-7} = 0b0;
let Inst{13-12} = 0b01;
let Inst{31-21} = 0b10011010010;
@@ -11202,8 +11256,8 @@ let hasNewValue = 1;
let opNewValue = 1;
let addrMode = AbsoluteSet;
let accessSize = HalfWordAccess;
-let isExtended = 1;
let mayLoad = 1;
+let isExtended = 1;
let DecoderNamespace = "MustExtend";
let isExtendable = 1;
let opExtendable = 3;
@@ -11216,13 +11270,13 @@ def L4_loadalignh_ur : HInst<
(outs DoubleRegs:$Ryy32),
(ins DoubleRegs:$Ryy32in, IntRegs:$Rt32, u2_0Imm:$Ii, u32_0Imm:$II),
"$Ryy32 = memh_fifo($Rt32<<#$Ii+#$II)",
-LD_tc_ld_SLOT01, TypeLD>, Enc_7303598 {
+tc_baccf077, TypeLD>, Enc_04c959 {
let Inst{12-12} = 0b1;
let Inst{31-21} = 0b10011100010;
let addrMode = BaseLongOffset;
let accessSize = HalfWordAccess;
-let isExtended = 1;
let mayLoad = 1;
+let isExtended = 1;
let InputType = "imm";
let DecoderNamespace = "MustExtend";
let isExtendable = 1;
@@ -11236,7 +11290,7 @@ def L4_loadbsw2_ap : HInst<
(outs IntRegs:$Rd32, IntRegs:$Re32),
(ins u32_0Imm:$II),
"$Rd32 = membh($Re32=#$II)",
-LD_tc_ld_SLOT01, TypeLD>, Enc_12616482 {
+tc_b5f5a094, TypeLD>, Enc_323f2d {
let Inst{7-7} = 0b0;
let Inst{13-12} = 0b01;
let Inst{31-21} = 0b10011010001;
@@ -11246,8 +11300,8 @@ let hasNewValue2 = 1;
let opNewValue2 = 1;
let addrMode = AbsoluteSet;
let accessSize = HalfWordAccess;
-let isExtended = 1;
let mayLoad = 1;
+let isExtended = 1;
let DecoderNamespace = "MustExtend";
let isExtendable = 1;
let opExtendable = 2;
@@ -11259,15 +11313,15 @@ def L4_loadbsw2_ur : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rt32, u2_0Imm:$Ii, u32_0Imm:$II),
"$Rd32 = membh($Rt32<<#$Ii+#$II)",
-LD_tc_ld_SLOT01, TypeLD>, Enc_486163 {
+tc_7d9a56cd, TypeLD>, Enc_4f677b {
let Inst{12-12} = 0b1;
let Inst{31-21} = 0b10011100001;
let hasNewValue = 1;
let opNewValue = 0;
let addrMode = BaseLongOffset;
let accessSize = HalfWordAccess;
-let isExtended = 1;
let mayLoad = 1;
+let isExtended = 1;
let InputType = "imm";
let DecoderNamespace = "MustExtend";
let isExtendable = 1;
@@ -11280,7 +11334,7 @@ def L4_loadbsw4_ap : HInst<
(outs DoubleRegs:$Rdd32, IntRegs:$Re32),
(ins u32_0Imm:$II),
"$Rdd32 = membh($Re32=#$II)",
-LD_tc_ld_SLOT01, TypeLD>, Enc_877823 {
+tc_b5f5a094, TypeLD>, Enc_7fa7f6 {
let Inst{7-7} = 0b0;
let Inst{13-12} = 0b01;
let Inst{31-21} = 0b10011010111;
@@ -11288,8 +11342,8 @@ let hasNewValue = 1;
let opNewValue = 1;
let addrMode = AbsoluteSet;
let accessSize = WordAccess;
-let isExtended = 1;
let mayLoad = 1;
+let isExtended = 1;
let DecoderNamespace = "MustExtend";
let isExtendable = 1;
let opExtendable = 2;
@@ -11301,13 +11355,13 @@ def L4_loadbsw4_ur : HInst<
(outs DoubleRegs:$Rdd32),
(ins IntRegs:$Rt32, u2_0Imm:$Ii, u32_0Imm:$II),
"$Rdd32 = membh($Rt32<<#$Ii+#$II)",
-LD_tc_ld_SLOT01, TypeLD>, Enc_5582416 {
+tc_7d9a56cd, TypeLD>, Enc_6185fe {
let Inst{12-12} = 0b1;
let Inst{31-21} = 0b10011100111;
let addrMode = BaseLongOffset;
let accessSize = WordAccess;
-let isExtended = 1;
let mayLoad = 1;
+let isExtended = 1;
let InputType = "imm";
let DecoderNamespace = "MustExtend";
let isExtendable = 1;
@@ -11320,7 +11374,7 @@ def L4_loadbzw2_ap : HInst<
(outs IntRegs:$Rd32, IntRegs:$Re32),
(ins u32_0Imm:$II),
"$Rd32 = memubh($Re32=#$II)",
-LD_tc_ld_SLOT01, TypeLD>, Enc_12616482 {
+tc_b5f5a094, TypeLD>, Enc_323f2d {
let Inst{7-7} = 0b0;
let Inst{13-12} = 0b01;
let Inst{31-21} = 0b10011010011;
@@ -11330,8 +11384,8 @@ let hasNewValue2 = 1;
let opNewValue2 = 1;
let addrMode = AbsoluteSet;
let accessSize = HalfWordAccess;
-let isExtended = 1;
let mayLoad = 1;
+let isExtended = 1;
let DecoderNamespace = "MustExtend";
let isExtendable = 1;
let opExtendable = 2;
@@ -11343,15 +11397,15 @@ def L4_loadbzw2_ur : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rt32, u2_0Imm:$Ii, u32_0Imm:$II),
"$Rd32 = memubh($Rt32<<#$Ii+#$II)",
-LD_tc_ld_SLOT01, TypeLD>, Enc_486163 {
+tc_7d9a56cd, TypeLD>, Enc_4f677b {
let Inst{12-12} = 0b1;
let Inst{31-21} = 0b10011100011;
let hasNewValue = 1;
let opNewValue = 0;
let addrMode = BaseLongOffset;
let accessSize = HalfWordAccess;
-let isExtended = 1;
let mayLoad = 1;
+let isExtended = 1;
let InputType = "imm";
let DecoderNamespace = "MustExtend";
let isExtendable = 1;
@@ -11364,7 +11418,7 @@ def L4_loadbzw4_ap : HInst<
(outs DoubleRegs:$Rdd32, IntRegs:$Re32),
(ins u32_0Imm:$II),
"$Rdd32 = memubh($Re32=#$II)",
-LD_tc_ld_SLOT01, TypeLD>, Enc_877823 {
+tc_b5f5a094, TypeLD>, Enc_7fa7f6 {
let Inst{7-7} = 0b0;
let Inst{13-12} = 0b01;
let Inst{31-21} = 0b10011010101;
@@ -11372,8 +11426,8 @@ let hasNewValue = 1;
let opNewValue = 1;
let addrMode = AbsoluteSet;
let accessSize = WordAccess;
-let isExtended = 1;
let mayLoad = 1;
+let isExtended = 1;
let DecoderNamespace = "MustExtend";
let isExtendable = 1;
let opExtendable = 2;
@@ -11385,13 +11439,13 @@ def L4_loadbzw4_ur : HInst<
(outs DoubleRegs:$Rdd32),
(ins IntRegs:$Rt32, u2_0Imm:$Ii, u32_0Imm:$II),
"$Rdd32 = memubh($Rt32<<#$Ii+#$II)",
-LD_tc_ld_SLOT01, TypeLD>, Enc_5582416 {
+tc_7d9a56cd, TypeLD>, Enc_6185fe {
let Inst{12-12} = 0b1;
let Inst{31-21} = 0b10011100101;
let addrMode = BaseLongOffset;
let accessSize = WordAccess;
-let isExtended = 1;
let mayLoad = 1;
+let isExtended = 1;
let InputType = "imm";
let DecoderNamespace = "MustExtend";
let isExtendable = 1;
@@ -11404,18 +11458,18 @@ def L4_loadd_locked : HInst<
(outs DoubleRegs:$Rdd32),
(ins IntRegs:$Rs32),
"$Rdd32 = memd_locked($Rs32)",
-LD_tc_ld_SLOT0, TypeLD>, Enc_4030179 {
+tc_29c14515, TypeLD>, Enc_3a3d62 {
let Inst{13-5} = 0b010000000;
let Inst{31-21} = 0b10010010000;
let accessSize = DoubleWordAccess;
-let isSoloAX = 1;
let mayLoad = 1;
+let isSoloAX = 1;
}
def L4_loadrb_ap : HInst<
(outs IntRegs:$Rd32, IntRegs:$Re32),
(ins u32_0Imm:$II),
"$Rd32 = memb($Re32=#$II)",
-LD_tc_ld_SLOT01, TypeLD>, Enc_12616482 {
+tc_b5f5a094, TypeLD>, Enc_323f2d {
let Inst{7-7} = 0b0;
let Inst{13-12} = 0b01;
let Inst{31-21} = 0b10011011000;
@@ -11425,8 +11479,8 @@ let hasNewValue2 = 1;
let opNewValue2 = 1;
let addrMode = AbsoluteSet;
let accessSize = ByteAccess;
-let isExtended = 1;
let mayLoad = 1;
+let isExtended = 1;
let DecoderNamespace = "MustExtend";
let isExtendable = 1;
let opExtendable = 2;
@@ -11438,7 +11492,7 @@ def L4_loadrb_rr : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rs32, IntRegs:$Rt32, u2_0Imm:$Ii),
"$Rd32 = memb($Rs32+$Rt32<<#$Ii)",
-V4LDST_tc_ld_SLOT01, TypeLD>, Enc_10721363, AddrModeRel, ImmRegShl {
+tc_5625c6c1, TypeLD>, Enc_da664b, AddrModeRel, ImmRegShl {
let Inst{6-5} = 0b00;
let Inst{31-21} = 0b00111010000;
let hasNewValue = 1;
@@ -11455,15 +11509,15 @@ def L4_loadrb_ur : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rt32, u2_0Imm:$Ii, u32_0Imm:$II),
"$Rd32 = memb($Rt32<<#$Ii+#$II)",
-LD_tc_ld_SLOT01, TypeLD>, Enc_486163, AddrModeRel, ImmRegShl {
+tc_7d9a56cd, TypeLD>, Enc_4f677b, AddrModeRel, ImmRegShl {
let Inst{12-12} = 0b1;
let Inst{31-21} = 0b10011101000;
let hasNewValue = 1;
let opNewValue = 0;
let addrMode = BaseLongOffset;
let accessSize = ByteAccess;
-let isExtended = 1;
let mayLoad = 1;
+let isExtended = 1;
let CextOpcode = "L2_loadrb";
let InputType = "imm";
let DecoderNamespace = "MustExtend";
@@ -11477,7 +11531,7 @@ def L4_loadrd_ap : HInst<
(outs DoubleRegs:$Rdd32, IntRegs:$Re32),
(ins u32_0Imm:$II),
"$Rdd32 = memd($Re32=#$II)",
-LD_tc_ld_SLOT01, TypeLD>, Enc_877823 {
+tc_b5f5a094, TypeLD>, Enc_7fa7f6 {
let Inst{7-7} = 0b0;
let Inst{13-12} = 0b01;
let Inst{31-21} = 0b10011011110;
@@ -11485,8 +11539,8 @@ let hasNewValue = 1;
let opNewValue = 1;
let addrMode = AbsoluteSet;
let accessSize = DoubleWordAccess;
-let isExtended = 1;
let mayLoad = 1;
+let isExtended = 1;
let DecoderNamespace = "MustExtend";
let isExtendable = 1;
let opExtendable = 2;
@@ -11498,7 +11552,7 @@ def L4_loadrd_rr : HInst<
(outs DoubleRegs:$Rdd32),
(ins IntRegs:$Rs32, IntRegs:$Rt32, u2_0Imm:$Ii),
"$Rdd32 = memd($Rs32+$Rt32<<#$Ii)",
-V4LDST_tc_ld_SLOT01, TypeLD>, Enc_7581852, AddrModeRel, ImmRegShl {
+tc_5625c6c1, TypeLD>, Enc_84bff1, AddrModeRel, ImmRegShl {
let Inst{6-5} = 0b00;
let Inst{31-21} = 0b00111010110;
let addrMode = BaseRegOffset;
@@ -11513,13 +11567,13 @@ def L4_loadrd_ur : HInst<
(outs DoubleRegs:$Rdd32),
(ins IntRegs:$Rt32, u2_0Imm:$Ii, u32_0Imm:$II),
"$Rdd32 = memd($Rt32<<#$Ii+#$II)",
-LD_tc_ld_SLOT01, TypeLD>, Enc_5582416, AddrModeRel, ImmRegShl {
+tc_7d9a56cd, TypeLD>, Enc_6185fe, AddrModeRel, ImmRegShl {
let Inst{12-12} = 0b1;
let Inst{31-21} = 0b10011101110;
let addrMode = BaseLongOffset;
let accessSize = DoubleWordAccess;
-let isExtended = 1;
let mayLoad = 1;
+let isExtended = 1;
let CextOpcode = "L2_loadrd";
let InputType = "imm";
let DecoderNamespace = "MustExtend";
@@ -11533,7 +11587,7 @@ def L4_loadrh_ap : HInst<
(outs IntRegs:$Rd32, IntRegs:$Re32),
(ins u32_0Imm:$II),
"$Rd32 = memh($Re32=#$II)",
-LD_tc_ld_SLOT01, TypeLD>, Enc_12616482 {
+tc_b5f5a094, TypeLD>, Enc_323f2d {
let Inst{7-7} = 0b0;
let Inst{13-12} = 0b01;
let Inst{31-21} = 0b10011011010;
@@ -11543,8 +11597,8 @@ let hasNewValue2 = 1;
let opNewValue2 = 1;
let addrMode = AbsoluteSet;
let accessSize = HalfWordAccess;
-let isExtended = 1;
let mayLoad = 1;
+let isExtended = 1;
let DecoderNamespace = "MustExtend";
let isExtendable = 1;
let opExtendable = 2;
@@ -11556,7 +11610,7 @@ def L4_loadrh_rr : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rs32, IntRegs:$Rt32, u2_0Imm:$Ii),
"$Rd32 = memh($Rs32+$Rt32<<#$Ii)",
-V4LDST_tc_ld_SLOT01, TypeLD>, Enc_10721363, AddrModeRel, ImmRegShl {
+tc_5625c6c1, TypeLD>, Enc_da664b, AddrModeRel, ImmRegShl {
let Inst{6-5} = 0b00;
let Inst{31-21} = 0b00111010010;
let hasNewValue = 1;
@@ -11573,15 +11627,15 @@ def L4_loadrh_ur : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rt32, u2_0Imm:$Ii, u32_0Imm:$II),
"$Rd32 = memh($Rt32<<#$Ii+#$II)",
-LD_tc_ld_SLOT01, TypeLD>, Enc_486163, AddrModeRel, ImmRegShl {
+tc_7d9a56cd, TypeLD>, Enc_4f677b, AddrModeRel, ImmRegShl {
let Inst{12-12} = 0b1;
let Inst{31-21} = 0b10011101010;
let hasNewValue = 1;
let opNewValue = 0;
let addrMode = BaseLongOffset;
let accessSize = HalfWordAccess;
-let isExtended = 1;
let mayLoad = 1;
+let isExtended = 1;
let CextOpcode = "L2_loadrh";
let InputType = "imm";
let DecoderNamespace = "MustExtend";
@@ -11595,7 +11649,7 @@ def L4_loadri_ap : HInst<
(outs IntRegs:$Rd32, IntRegs:$Re32),
(ins u32_0Imm:$II),
"$Rd32 = memw($Re32=#$II)",
-LD_tc_ld_SLOT01, TypeLD>, Enc_12616482 {
+tc_b5f5a094, TypeLD>, Enc_323f2d {
let Inst{7-7} = 0b0;
let Inst{13-12} = 0b01;
let Inst{31-21} = 0b10011011100;
@@ -11605,8 +11659,8 @@ let hasNewValue2 = 1;
let opNewValue2 = 1;
let addrMode = AbsoluteSet;
let accessSize = WordAccess;
-let isExtended = 1;
let mayLoad = 1;
+let isExtended = 1;
let DecoderNamespace = "MustExtend";
let isExtendable = 1;
let opExtendable = 2;
@@ -11618,7 +11672,7 @@ def L4_loadri_rr : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rs32, IntRegs:$Rt32, u2_0Imm:$Ii),
"$Rd32 = memw($Rs32+$Rt32<<#$Ii)",
-V4LDST_tc_ld_SLOT01, TypeLD>, Enc_10721363, AddrModeRel, ImmRegShl {
+tc_5625c6c1, TypeLD>, Enc_da664b, AddrModeRel, ImmRegShl {
let Inst{6-5} = 0b00;
let Inst{31-21} = 0b00111010100;
let hasNewValue = 1;
@@ -11635,15 +11689,15 @@ def L4_loadri_ur : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rt32, u2_0Imm:$Ii, u32_0Imm:$II),
"$Rd32 = memw($Rt32<<#$Ii+#$II)",
-LD_tc_ld_SLOT01, TypeLD>, Enc_486163, AddrModeRel, ImmRegShl {
+tc_7d9a56cd, TypeLD>, Enc_4f677b, AddrModeRel, ImmRegShl {
let Inst{12-12} = 0b1;
let Inst{31-21} = 0b10011101100;
let hasNewValue = 1;
let opNewValue = 0;
let addrMode = BaseLongOffset;
let accessSize = WordAccess;
-let isExtended = 1;
let mayLoad = 1;
+let isExtended = 1;
let CextOpcode = "L2_loadri";
let InputType = "imm";
let DecoderNamespace = "MustExtend";
@@ -11657,7 +11711,7 @@ def L4_loadrub_ap : HInst<
(outs IntRegs:$Rd32, IntRegs:$Re32),
(ins u32_0Imm:$II),
"$Rd32 = memub($Re32=#$II)",
-LD_tc_ld_SLOT01, TypeLD>, Enc_12616482 {
+tc_b5f5a094, TypeLD>, Enc_323f2d {
let Inst{7-7} = 0b0;
let Inst{13-12} = 0b01;
let Inst{31-21} = 0b10011011001;
@@ -11667,8 +11721,8 @@ let hasNewValue2 = 1;
let opNewValue2 = 1;
let addrMode = AbsoluteSet;
let accessSize = ByteAccess;
-let isExtended = 1;
let mayLoad = 1;
+let isExtended = 1;
let DecoderNamespace = "MustExtend";
let isExtendable = 1;
let opExtendable = 2;
@@ -11680,7 +11734,7 @@ def L4_loadrub_rr : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rs32, IntRegs:$Rt32, u2_0Imm:$Ii),
"$Rd32 = memub($Rs32+$Rt32<<#$Ii)",
-V4LDST_tc_ld_SLOT01, TypeLD>, Enc_10721363, AddrModeRel, ImmRegShl {
+tc_5625c6c1, TypeLD>, Enc_da664b, AddrModeRel, ImmRegShl {
let Inst{6-5} = 0b00;
let Inst{31-21} = 0b00111010001;
let hasNewValue = 1;
@@ -11697,15 +11751,15 @@ def L4_loadrub_ur : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rt32, u2_0Imm:$Ii, u32_0Imm:$II),
"$Rd32 = memub($Rt32<<#$Ii+#$II)",
-LD_tc_ld_SLOT01, TypeLD>, Enc_486163, AddrModeRel, ImmRegShl {
+tc_7d9a56cd, TypeLD>, Enc_4f677b, AddrModeRel, ImmRegShl {
let Inst{12-12} = 0b1;
let Inst{31-21} = 0b10011101001;
let hasNewValue = 1;
let opNewValue = 0;
let addrMode = BaseLongOffset;
let accessSize = ByteAccess;
-let isExtended = 1;
let mayLoad = 1;
+let isExtended = 1;
let CextOpcode = "L2_loadrub";
let InputType = "imm";
let DecoderNamespace = "MustExtend";
@@ -11719,7 +11773,7 @@ def L4_loadruh_ap : HInst<
(outs IntRegs:$Rd32, IntRegs:$Re32),
(ins u32_0Imm:$II),
"$Rd32 = memuh($Re32=#$II)",
-LD_tc_ld_SLOT01, TypeLD>, Enc_12616482 {
+tc_b5f5a094, TypeLD>, Enc_323f2d {
let Inst{7-7} = 0b0;
let Inst{13-12} = 0b01;
let Inst{31-21} = 0b10011011011;
@@ -11729,8 +11783,8 @@ let hasNewValue2 = 1;
let opNewValue2 = 1;
let addrMode = AbsoluteSet;
let accessSize = HalfWordAccess;
-let isExtended = 1;
let mayLoad = 1;
+let isExtended = 1;
let DecoderNamespace = "MustExtend";
let isExtendable = 1;
let opExtendable = 2;
@@ -11742,7 +11796,7 @@ def L4_loadruh_rr : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rs32, IntRegs:$Rt32, u2_0Imm:$Ii),
"$Rd32 = memuh($Rs32+$Rt32<<#$Ii)",
-V4LDST_tc_ld_SLOT01, TypeLD>, Enc_10721363, AddrModeRel, ImmRegShl {
+tc_5625c6c1, TypeLD>, Enc_da664b, AddrModeRel, ImmRegShl {
let Inst{6-5} = 0b00;
let Inst{31-21} = 0b00111010011;
let hasNewValue = 1;
@@ -11759,15 +11813,15 @@ def L4_loadruh_ur : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rt32, u2_0Imm:$Ii, u32_0Imm:$II),
"$Rd32 = memuh($Rt32<<#$Ii+#$II)",
-LD_tc_ld_SLOT01, TypeLD>, Enc_486163, AddrModeRel, ImmRegShl {
+tc_7d9a56cd, TypeLD>, Enc_4f677b, AddrModeRel, ImmRegShl {
let Inst{12-12} = 0b1;
let Inst{31-21} = 0b10011101011;
let hasNewValue = 1;
let opNewValue = 0;
let addrMode = BaseLongOffset;
let accessSize = HalfWordAccess;
-let isExtended = 1;
let mayLoad = 1;
+let isExtended = 1;
let CextOpcode = "L2_loadruh";
let InputType = "imm";
let DecoderNamespace = "MustExtend";
@@ -11781,14 +11835,14 @@ def L4_or_memopb_io : HInst<
(outs),
(ins IntRegs:$Rs32, u32_0Imm:$Ii, IntRegs:$Rt32),
"memb($Rs32+#$Ii) |= $Rt32",
-V4LDST_tc_st_SLOT0, TypeV4LDST>, Enc_11849200 {
+tc_a9c993d9, TypeV4LDST>, Enc_d44e31 {
let Inst{6-5} = 0b11;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00111110000;
let addrMode = BaseImmOffset;
let accessSize = ByteAccess;
-let mayStore = 1;
let mayLoad = 1;
+let mayStore = 1;
let isExtendable = 1;
let opExtendable = 1;
let isExtentSigned = 0;
@@ -11799,7 +11853,7 @@ def L4_or_memopb_zomap : HInst<
(outs),
(ins IntRegs:$Rs32, IntRegs:$Rt32),
"memb($Rs32) |= $Rt32",
-PSEUDO, TypeMAPPING> {
+tc_a9c993d9, TypeMAPPING> {
let isPseudo = 1;
let isCodeGenOnly = 1;
}
@@ -11807,14 +11861,14 @@ def L4_or_memoph_io : HInst<
(outs),
(ins IntRegs:$Rs32, u31_1Imm:$Ii, IntRegs:$Rt32),
"memh($Rs32+#$Ii) |= $Rt32",
-V4LDST_tc_st_SLOT0, TypeV4LDST>, Enc_8849208 {
+tc_a9c993d9, TypeV4LDST>, Enc_163a3c {
let Inst{6-5} = 0b11;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00111110001;
let addrMode = BaseImmOffset;
let accessSize = HalfWordAccess;
-let mayStore = 1;
let mayLoad = 1;
+let mayStore = 1;
let isExtendable = 1;
let opExtendable = 1;
let isExtentSigned = 0;
@@ -11825,7 +11879,7 @@ def L4_or_memoph_zomap : HInst<
(outs),
(ins IntRegs:$Rs32, IntRegs:$Rt32),
"memh($Rs32) |= $Rt32",
-PSEUDO, TypeMAPPING> {
+tc_a9c993d9, TypeMAPPING> {
let isPseudo = 1;
let isCodeGenOnly = 1;
}
@@ -11833,14 +11887,14 @@ def L4_or_memopw_io : HInst<
(outs),
(ins IntRegs:$Rs32, u30_2Imm:$Ii, IntRegs:$Rt32),
"memw($Rs32+#$Ii) |= $Rt32",
-V4LDST_tc_st_SLOT0, TypeV4LDST>, Enc_9849208 {
+tc_a9c993d9, TypeV4LDST>, Enc_226535 {
let Inst{6-5} = 0b11;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00111110010;
let addrMode = BaseImmOffset;
let accessSize = WordAccess;
-let mayStore = 1;
let mayLoad = 1;
+let mayStore = 1;
let isExtendable = 1;
let opExtendable = 1;
let isExtentSigned = 0;
@@ -11851,7 +11905,7 @@ def L4_or_memopw_zomap : HInst<
(outs),
(ins IntRegs:$Rs32, IntRegs:$Rt32),
"memw($Rs32) |= $Rt32",
-PSEUDO, TypeMAPPING> {
+tc_a9c993d9, TypeMAPPING> {
let isPseudo = 1;
let isCodeGenOnly = 1;
}
@@ -11859,7 +11913,7 @@ def L4_ploadrbf_abs : HInst<
(outs IntRegs:$Rd32),
(ins PredRegs:$Pt4, u32_0Imm:$Ii),
"if (!$Pt4) $Rd32 = memb(#$Ii)",
-LD_tc_ld_SLOT01, TypeLD>, Enc_13344657, AddrModeRel {
+tc_136c4786, TypeLD>, Enc_2301d6, AddrModeRel {
let Inst{7-5} = 0b100;
let Inst{13-11} = 0b101;
let Inst{31-21} = 0b10011111000;
@@ -11869,8 +11923,8 @@ let hasNewValue = 1;
let opNewValue = 0;
let addrMode = Absolute;
let accessSize = ByteAccess;
-let isExtended = 1;
let mayLoad = 1;
+let isExtended = 1;
let CextOpcode = "L2_loadrb";
let BaseOpcode = "L4_loadrb_abs";
let DecoderNamespace = "MustExtend";
@@ -11884,7 +11938,7 @@ def L4_ploadrbf_rr : HInst<
(outs IntRegs:$Rd32),
(ins PredRegs:$Pv4, IntRegs:$Rs32, IntRegs:$Rt32, u2_0Imm:$Ii),
"if (!$Pv4) $Rd32 = memb($Rs32+$Rt32<<#$Ii)",
-V4LDST_tc_ld_SLOT01, TypeLD>, Enc_1793896, AddrModeRel {
+tc_9dafb7d3, TypeLD>, Enc_2e1979, AddrModeRel {
let Inst{31-21} = 0b00110001000;
let isPredicated = 1;
let isPredicatedFalse = 1;
@@ -11901,7 +11955,7 @@ def L4_ploadrbfnew_abs : HInst<
(outs IntRegs:$Rd32),
(ins PredRegs:$Pt4, u32_0Imm:$Ii),
"if (!$Pt4.new) $Rd32 = memb(#$Ii)",
-LD_tc_ld_SLOT01, TypeLD>, Enc_13344657, AddrModeRel {
+tc_b5f5a094, TypeLD>, Enc_2301d6, AddrModeRel {
let Inst{7-5} = 0b100;
let Inst{13-11} = 0b111;
let Inst{31-21} = 0b10011111000;
@@ -11911,9 +11965,9 @@ let hasNewValue = 1;
let opNewValue = 0;
let addrMode = Absolute;
let accessSize = ByteAccess;
-let isExtended = 1;
let isPredicatedNew = 1;
let mayLoad = 1;
+let isExtended = 1;
let CextOpcode = "L2_loadrb";
let BaseOpcode = "L4_loadrb_abs";
let DecoderNamespace = "MustExtend";
@@ -11927,7 +11981,7 @@ def L4_ploadrbfnew_rr : HInst<
(outs IntRegs:$Rd32),
(ins PredRegs:$Pv4, IntRegs:$Rs32, IntRegs:$Rt32, u2_0Imm:$Ii),
"if (!$Pv4.new) $Rd32 = memb($Rs32+$Rt32<<#$Ii)",
-V4LDST_tc_ld_SLOT01, TypeLD>, Enc_1793896, AddrModeRel {
+tc_128719e8, TypeLD>, Enc_2e1979, AddrModeRel {
let Inst{31-21} = 0b00110011000;
let isPredicated = 1;
let isPredicatedFalse = 1;
@@ -11945,7 +11999,7 @@ def L4_ploadrbt_abs : HInst<
(outs IntRegs:$Rd32),
(ins PredRegs:$Pt4, u32_0Imm:$Ii),
"if ($Pt4) $Rd32 = memb(#$Ii)",
-LD_tc_ld_SLOT01, TypeLD>, Enc_13344657, AddrModeRel {
+tc_136c4786, TypeLD>, Enc_2301d6, AddrModeRel {
let Inst{7-5} = 0b100;
let Inst{13-11} = 0b100;
let Inst{31-21} = 0b10011111000;
@@ -11954,8 +12008,8 @@ let hasNewValue = 1;
let opNewValue = 0;
let addrMode = Absolute;
let accessSize = ByteAccess;
-let isExtended = 1;
let mayLoad = 1;
+let isExtended = 1;
let CextOpcode = "L2_loadrb";
let BaseOpcode = "L4_loadrb_abs";
let DecoderNamespace = "MustExtend";
@@ -11969,7 +12023,7 @@ def L4_ploadrbt_rr : HInst<
(outs IntRegs:$Rd32),
(ins PredRegs:$Pv4, IntRegs:$Rs32, IntRegs:$Rt32, u2_0Imm:$Ii),
"if ($Pv4) $Rd32 = memb($Rs32+$Rt32<<#$Ii)",
-V4LDST_tc_ld_SLOT01, TypeLD>, Enc_1793896, AddrModeRel {
+tc_9dafb7d3, TypeLD>, Enc_2e1979, AddrModeRel {
let Inst{31-21} = 0b00110000000;
let isPredicated = 1;
let hasNewValue = 1;
@@ -11985,7 +12039,7 @@ def L4_ploadrbtnew_abs : HInst<
(outs IntRegs:$Rd32),
(ins PredRegs:$Pt4, u32_0Imm:$Ii),
"if ($Pt4.new) $Rd32 = memb(#$Ii)",
-LD_tc_ld_SLOT01, TypeLD>, Enc_13344657, AddrModeRel {
+tc_b5f5a094, TypeLD>, Enc_2301d6, AddrModeRel {
let Inst{7-5} = 0b100;
let Inst{13-11} = 0b110;
let Inst{31-21} = 0b10011111000;
@@ -11994,9 +12048,9 @@ let hasNewValue = 1;
let opNewValue = 0;
let addrMode = Absolute;
let accessSize = ByteAccess;
-let isExtended = 1;
let isPredicatedNew = 1;
let mayLoad = 1;
+let isExtended = 1;
let CextOpcode = "L2_loadrb";
let BaseOpcode = "L4_loadrb_abs";
let DecoderNamespace = "MustExtend";
@@ -12010,7 +12064,7 @@ def L4_ploadrbtnew_rr : HInst<
(outs IntRegs:$Rd32),
(ins PredRegs:$Pv4, IntRegs:$Rs32, IntRegs:$Rt32, u2_0Imm:$Ii),
"if ($Pv4.new) $Rd32 = memb($Rs32+$Rt32<<#$Ii)",
-V4LDST_tc_ld_SLOT01, TypeLD>, Enc_1793896, AddrModeRel {
+tc_128719e8, TypeLD>, Enc_2e1979, AddrModeRel {
let Inst{31-21} = 0b00110010000;
let isPredicated = 1;
let hasNewValue = 1;
@@ -12027,7 +12081,7 @@ def L4_ploadrdf_abs : HInst<
(outs DoubleRegs:$Rdd32),
(ins PredRegs:$Pt4, u32_0Imm:$Ii),
"if (!$Pt4) $Rdd32 = memd(#$Ii)",
-LD_tc_ld_SLOT01, TypeLD>, Enc_15182416, AddrModeRel {
+tc_136c4786, TypeLD>, Enc_2a7b91, AddrModeRel {
let Inst{7-5} = 0b100;
let Inst{13-11} = 0b101;
let Inst{31-21} = 0b10011111110;
@@ -12035,8 +12089,8 @@ let isPredicated = 1;
let isPredicatedFalse = 1;
let addrMode = Absolute;
let accessSize = DoubleWordAccess;
-let isExtended = 1;
let mayLoad = 1;
+let isExtended = 1;
let CextOpcode = "L2_loadrd";
let BaseOpcode = "L4_loadrd_abs";
let DecoderNamespace = "MustExtend";
@@ -12050,7 +12104,7 @@ def L4_ploadrdf_rr : HInst<
(outs DoubleRegs:$Rdd32),
(ins PredRegs:$Pv4, IntRegs:$Rs32, IntRegs:$Rt32, u2_0Imm:$Ii),
"if (!$Pv4) $Rdd32 = memd($Rs32+$Rt32<<#$Ii)",
-V4LDST_tc_ld_SLOT01, TypeLD>, Enc_7254313, AddrModeRel {
+tc_9dafb7d3, TypeLD>, Enc_98c0b8, AddrModeRel {
let Inst{31-21} = 0b00110001110;
let isPredicated = 1;
let isPredicatedFalse = 1;
@@ -12065,7 +12119,7 @@ def L4_ploadrdfnew_abs : HInst<
(outs DoubleRegs:$Rdd32),
(ins PredRegs:$Pt4, u32_0Imm:$Ii),
"if (!$Pt4.new) $Rdd32 = memd(#$Ii)",
-LD_tc_ld_SLOT01, TypeLD>, Enc_15182416, AddrModeRel {
+tc_b5f5a094, TypeLD>, Enc_2a7b91, AddrModeRel {
let Inst{7-5} = 0b100;
let Inst{13-11} = 0b111;
let Inst{31-21} = 0b10011111110;
@@ -12073,9 +12127,9 @@ let isPredicated = 1;
let isPredicatedFalse = 1;
let addrMode = Absolute;
let accessSize = DoubleWordAccess;
-let isExtended = 1;
let isPredicatedNew = 1;
let mayLoad = 1;
+let isExtended = 1;
let CextOpcode = "L2_loadrd";
let BaseOpcode = "L4_loadrd_abs";
let DecoderNamespace = "MustExtend";
@@ -12089,7 +12143,7 @@ def L4_ploadrdfnew_rr : HInst<
(outs DoubleRegs:$Rdd32),
(ins PredRegs:$Pv4, IntRegs:$Rs32, IntRegs:$Rt32, u2_0Imm:$Ii),
"if (!$Pv4.new) $Rdd32 = memd($Rs32+$Rt32<<#$Ii)",
-V4LDST_tc_ld_SLOT01, TypeLD>, Enc_7254313, AddrModeRel {
+tc_128719e8, TypeLD>, Enc_98c0b8, AddrModeRel {
let Inst{31-21} = 0b00110011110;
let isPredicated = 1;
let isPredicatedFalse = 1;
@@ -12105,15 +12159,15 @@ def L4_ploadrdt_abs : HInst<
(outs DoubleRegs:$Rdd32),
(ins PredRegs:$Pt4, u32_0Imm:$Ii),
"if ($Pt4) $Rdd32 = memd(#$Ii)",
-LD_tc_ld_SLOT01, TypeLD>, Enc_15182416, AddrModeRel {
+tc_136c4786, TypeLD>, Enc_2a7b91, AddrModeRel {
let Inst{7-5} = 0b100;
let Inst{13-11} = 0b100;
let Inst{31-21} = 0b10011111110;
let isPredicated = 1;
let addrMode = Absolute;
let accessSize = DoubleWordAccess;
-let isExtended = 1;
let mayLoad = 1;
+let isExtended = 1;
let CextOpcode = "L2_loadrd";
let BaseOpcode = "L4_loadrd_abs";
let DecoderNamespace = "MustExtend";
@@ -12127,7 +12181,7 @@ def L4_ploadrdt_rr : HInst<
(outs DoubleRegs:$Rdd32),
(ins PredRegs:$Pv4, IntRegs:$Rs32, IntRegs:$Rt32, u2_0Imm:$Ii),
"if ($Pv4) $Rdd32 = memd($Rs32+$Rt32<<#$Ii)",
-V4LDST_tc_ld_SLOT01, TypeLD>, Enc_7254313, AddrModeRel {
+tc_9dafb7d3, TypeLD>, Enc_98c0b8, AddrModeRel {
let Inst{31-21} = 0b00110000110;
let isPredicated = 1;
let addrMode = BaseRegOffset;
@@ -12141,16 +12195,16 @@ def L4_ploadrdtnew_abs : HInst<
(outs DoubleRegs:$Rdd32),
(ins PredRegs:$Pt4, u32_0Imm:$Ii),
"if ($Pt4.new) $Rdd32 = memd(#$Ii)",
-LD_tc_ld_SLOT01, TypeLD>, Enc_15182416, AddrModeRel {
+tc_b5f5a094, TypeLD>, Enc_2a7b91, AddrModeRel {
let Inst{7-5} = 0b100;
let Inst{13-11} = 0b110;
let Inst{31-21} = 0b10011111110;
let isPredicated = 1;
let addrMode = Absolute;
let accessSize = DoubleWordAccess;
-let isExtended = 1;
let isPredicatedNew = 1;
let mayLoad = 1;
+let isExtended = 1;
let CextOpcode = "L2_loadrd";
let BaseOpcode = "L4_loadrd_abs";
let DecoderNamespace = "MustExtend";
@@ -12164,7 +12218,7 @@ def L4_ploadrdtnew_rr : HInst<
(outs DoubleRegs:$Rdd32),
(ins PredRegs:$Pv4, IntRegs:$Rs32, IntRegs:$Rt32, u2_0Imm:$Ii),
"if ($Pv4.new) $Rdd32 = memd($Rs32+$Rt32<<#$Ii)",
-V4LDST_tc_ld_SLOT01, TypeLD>, Enc_7254313, AddrModeRel {
+tc_128719e8, TypeLD>, Enc_98c0b8, AddrModeRel {
let Inst{31-21} = 0b00110010110;
let isPredicated = 1;
let addrMode = BaseRegOffset;
@@ -12179,7 +12233,7 @@ def L4_ploadrhf_abs : HInst<
(outs IntRegs:$Rd32),
(ins PredRegs:$Pt4, u32_0Imm:$Ii),
"if (!$Pt4) $Rd32 = memh(#$Ii)",
-LD_tc_ld_SLOT01, TypeLD>, Enc_13344657, AddrModeRel {
+tc_136c4786, TypeLD>, Enc_2301d6, AddrModeRel {
let Inst{7-5} = 0b100;
let Inst{13-11} = 0b101;
let Inst{31-21} = 0b10011111010;
@@ -12189,8 +12243,8 @@ let hasNewValue = 1;
let opNewValue = 0;
let addrMode = Absolute;
let accessSize = HalfWordAccess;
-let isExtended = 1;
let mayLoad = 1;
+let isExtended = 1;
let CextOpcode = "L2_loadrh";
let BaseOpcode = "L4_loadrh_abs";
let DecoderNamespace = "MustExtend";
@@ -12204,7 +12258,7 @@ def L4_ploadrhf_rr : HInst<
(outs IntRegs:$Rd32),
(ins PredRegs:$Pv4, IntRegs:$Rs32, IntRegs:$Rt32, u2_0Imm:$Ii),
"if (!$Pv4) $Rd32 = memh($Rs32+$Rt32<<#$Ii)",
-V4LDST_tc_ld_SLOT01, TypeLD>, Enc_1793896, AddrModeRel {
+tc_9dafb7d3, TypeLD>, Enc_2e1979, AddrModeRel {
let Inst{31-21} = 0b00110001010;
let isPredicated = 1;
let isPredicatedFalse = 1;
@@ -12221,7 +12275,7 @@ def L4_ploadrhfnew_abs : HInst<
(outs IntRegs:$Rd32),
(ins PredRegs:$Pt4, u32_0Imm:$Ii),
"if (!$Pt4.new) $Rd32 = memh(#$Ii)",
-LD_tc_ld_SLOT01, TypeLD>, Enc_13344657, AddrModeRel {
+tc_b5f5a094, TypeLD>, Enc_2301d6, AddrModeRel {
let Inst{7-5} = 0b100;
let Inst{13-11} = 0b111;
let Inst{31-21} = 0b10011111010;
@@ -12231,9 +12285,9 @@ let hasNewValue = 1;
let opNewValue = 0;
let addrMode = Absolute;
let accessSize = HalfWordAccess;
-let isExtended = 1;
let isPredicatedNew = 1;
let mayLoad = 1;
+let isExtended = 1;
let CextOpcode = "L2_loadrh";
let BaseOpcode = "L4_loadrh_abs";
let DecoderNamespace = "MustExtend";
@@ -12247,7 +12301,7 @@ def L4_ploadrhfnew_rr : HInst<
(outs IntRegs:$Rd32),
(ins PredRegs:$Pv4, IntRegs:$Rs32, IntRegs:$Rt32, u2_0Imm:$Ii),
"if (!$Pv4.new) $Rd32 = memh($Rs32+$Rt32<<#$Ii)",
-V4LDST_tc_ld_SLOT01, TypeLD>, Enc_1793896, AddrModeRel {
+tc_128719e8, TypeLD>, Enc_2e1979, AddrModeRel {
let Inst{31-21} = 0b00110011010;
let isPredicated = 1;
let isPredicatedFalse = 1;
@@ -12265,7 +12319,7 @@ def L4_ploadrht_abs : HInst<
(outs IntRegs:$Rd32),
(ins PredRegs:$Pt4, u32_0Imm:$Ii),
"if ($Pt4) $Rd32 = memh(#$Ii)",
-LD_tc_ld_SLOT01, TypeLD>, Enc_13344657, AddrModeRel {
+tc_136c4786, TypeLD>, Enc_2301d6, AddrModeRel {
let Inst{7-5} = 0b100;
let Inst{13-11} = 0b100;
let Inst{31-21} = 0b10011111010;
@@ -12274,8 +12328,8 @@ let hasNewValue = 1;
let opNewValue = 0;
let addrMode = Absolute;
let accessSize = HalfWordAccess;
-let isExtended = 1;
let mayLoad = 1;
+let isExtended = 1;
let CextOpcode = "L2_loadrh";
let BaseOpcode = "L4_loadrh_abs";
let DecoderNamespace = "MustExtend";
@@ -12289,7 +12343,7 @@ def L4_ploadrht_rr : HInst<
(outs IntRegs:$Rd32),
(ins PredRegs:$Pv4, IntRegs:$Rs32, IntRegs:$Rt32, u2_0Imm:$Ii),
"if ($Pv4) $Rd32 = memh($Rs32+$Rt32<<#$Ii)",
-V4LDST_tc_ld_SLOT01, TypeLD>, Enc_1793896, AddrModeRel {
+tc_9dafb7d3, TypeLD>, Enc_2e1979, AddrModeRel {
let Inst{31-21} = 0b00110000010;
let isPredicated = 1;
let hasNewValue = 1;
@@ -12305,7 +12359,7 @@ def L4_ploadrhtnew_abs : HInst<
(outs IntRegs:$Rd32),
(ins PredRegs:$Pt4, u32_0Imm:$Ii),
"if ($Pt4.new) $Rd32 = memh(#$Ii)",
-LD_tc_ld_SLOT01, TypeLD>, Enc_13344657, AddrModeRel {
+tc_b5f5a094, TypeLD>, Enc_2301d6, AddrModeRel {
let Inst{7-5} = 0b100;
let Inst{13-11} = 0b110;
let Inst{31-21} = 0b10011111010;
@@ -12314,9 +12368,9 @@ let hasNewValue = 1;
let opNewValue = 0;
let addrMode = Absolute;
let accessSize = HalfWordAccess;
-let isExtended = 1;
let isPredicatedNew = 1;
let mayLoad = 1;
+let isExtended = 1;
let CextOpcode = "L2_loadrh";
let BaseOpcode = "L4_loadrh_abs";
let DecoderNamespace = "MustExtend";
@@ -12330,7 +12384,7 @@ def L4_ploadrhtnew_rr : HInst<
(outs IntRegs:$Rd32),
(ins PredRegs:$Pv4, IntRegs:$Rs32, IntRegs:$Rt32, u2_0Imm:$Ii),
"if ($Pv4.new) $Rd32 = memh($Rs32+$Rt32<<#$Ii)",
-V4LDST_tc_ld_SLOT01, TypeLD>, Enc_1793896, AddrModeRel {
+tc_128719e8, TypeLD>, Enc_2e1979, AddrModeRel {
let Inst{31-21} = 0b00110010010;
let isPredicated = 1;
let hasNewValue = 1;
@@ -12347,7 +12401,7 @@ def L4_ploadrif_abs : HInst<
(outs IntRegs:$Rd32),
(ins PredRegs:$Pt4, u32_0Imm:$Ii),
"if (!$Pt4) $Rd32 = memw(#$Ii)",
-LD_tc_ld_SLOT01, TypeLD>, Enc_13344657, AddrModeRel {
+tc_136c4786, TypeLD>, Enc_2301d6, AddrModeRel {
let Inst{7-5} = 0b100;
let Inst{13-11} = 0b101;
let Inst{31-21} = 0b10011111100;
@@ -12357,8 +12411,8 @@ let hasNewValue = 1;
let opNewValue = 0;
let addrMode = Absolute;
let accessSize = WordAccess;
-let isExtended = 1;
let mayLoad = 1;
+let isExtended = 1;
let CextOpcode = "L2_loadri";
let BaseOpcode = "L4_loadri_abs";
let DecoderNamespace = "MustExtend";
@@ -12372,7 +12426,7 @@ def L4_ploadrif_rr : HInst<
(outs IntRegs:$Rd32),
(ins PredRegs:$Pv4, IntRegs:$Rs32, IntRegs:$Rt32, u2_0Imm:$Ii),
"if (!$Pv4) $Rd32 = memw($Rs32+$Rt32<<#$Ii)",
-V4LDST_tc_ld_SLOT01, TypeLD>, Enc_1793896, AddrModeRel {
+tc_9dafb7d3, TypeLD>, Enc_2e1979, AddrModeRel {
let Inst{31-21} = 0b00110001100;
let isPredicated = 1;
let isPredicatedFalse = 1;
@@ -12389,7 +12443,7 @@ def L4_ploadrifnew_abs : HInst<
(outs IntRegs:$Rd32),
(ins PredRegs:$Pt4, u32_0Imm:$Ii),
"if (!$Pt4.new) $Rd32 = memw(#$Ii)",
-LD_tc_ld_SLOT01, TypeLD>, Enc_13344657, AddrModeRel {
+tc_b5f5a094, TypeLD>, Enc_2301d6, AddrModeRel {
let Inst{7-5} = 0b100;
let Inst{13-11} = 0b111;
let Inst{31-21} = 0b10011111100;
@@ -12399,9 +12453,9 @@ let hasNewValue = 1;
let opNewValue = 0;
let addrMode = Absolute;
let accessSize = WordAccess;
-let isExtended = 1;
let isPredicatedNew = 1;
let mayLoad = 1;
+let isExtended = 1;
let CextOpcode = "L2_loadri";
let BaseOpcode = "L4_loadri_abs";
let DecoderNamespace = "MustExtend";
@@ -12415,7 +12469,7 @@ def L4_ploadrifnew_rr : HInst<
(outs IntRegs:$Rd32),
(ins PredRegs:$Pv4, IntRegs:$Rs32, IntRegs:$Rt32, u2_0Imm:$Ii),
"if (!$Pv4.new) $Rd32 = memw($Rs32+$Rt32<<#$Ii)",
-V4LDST_tc_ld_SLOT01, TypeLD>, Enc_1793896, AddrModeRel {
+tc_128719e8, TypeLD>, Enc_2e1979, AddrModeRel {
let Inst{31-21} = 0b00110011100;
let isPredicated = 1;
let isPredicatedFalse = 1;
@@ -12433,7 +12487,7 @@ def L4_ploadrit_abs : HInst<
(outs IntRegs:$Rd32),
(ins PredRegs:$Pt4, u32_0Imm:$Ii),
"if ($Pt4) $Rd32 = memw(#$Ii)",
-LD_tc_ld_SLOT01, TypeLD>, Enc_13344657, AddrModeRel {
+tc_136c4786, TypeLD>, Enc_2301d6, AddrModeRel {
let Inst{7-5} = 0b100;
let Inst{13-11} = 0b100;
let Inst{31-21} = 0b10011111100;
@@ -12442,8 +12496,8 @@ let hasNewValue = 1;
let opNewValue = 0;
let addrMode = Absolute;
let accessSize = WordAccess;
-let isExtended = 1;
let mayLoad = 1;
+let isExtended = 1;
let CextOpcode = "L2_loadri";
let BaseOpcode = "L4_loadri_abs";
let DecoderNamespace = "MustExtend";
@@ -12457,7 +12511,7 @@ def L4_ploadrit_rr : HInst<
(outs IntRegs:$Rd32),
(ins PredRegs:$Pv4, IntRegs:$Rs32, IntRegs:$Rt32, u2_0Imm:$Ii),
"if ($Pv4) $Rd32 = memw($Rs32+$Rt32<<#$Ii)",
-V4LDST_tc_ld_SLOT01, TypeLD>, Enc_1793896, AddrModeRel {
+tc_9dafb7d3, TypeLD>, Enc_2e1979, AddrModeRel {
let Inst{31-21} = 0b00110000100;
let isPredicated = 1;
let hasNewValue = 1;
@@ -12473,7 +12527,7 @@ def L4_ploadritnew_abs : HInst<
(outs IntRegs:$Rd32),
(ins PredRegs:$Pt4, u32_0Imm:$Ii),
"if ($Pt4.new) $Rd32 = memw(#$Ii)",
-LD_tc_ld_SLOT01, TypeLD>, Enc_13344657, AddrModeRel {
+tc_b5f5a094, TypeLD>, Enc_2301d6, AddrModeRel {
let Inst{7-5} = 0b100;
let Inst{13-11} = 0b110;
let Inst{31-21} = 0b10011111100;
@@ -12482,9 +12536,9 @@ let hasNewValue = 1;
let opNewValue = 0;
let addrMode = Absolute;
let accessSize = WordAccess;
-let isExtended = 1;
let isPredicatedNew = 1;
let mayLoad = 1;
+let isExtended = 1;
let CextOpcode = "L2_loadri";
let BaseOpcode = "L4_loadri_abs";
let DecoderNamespace = "MustExtend";
@@ -12498,7 +12552,7 @@ def L4_ploadritnew_rr : HInst<
(outs IntRegs:$Rd32),
(ins PredRegs:$Pv4, IntRegs:$Rs32, IntRegs:$Rt32, u2_0Imm:$Ii),
"if ($Pv4.new) $Rd32 = memw($Rs32+$Rt32<<#$Ii)",
-V4LDST_tc_ld_SLOT01, TypeLD>, Enc_1793896, AddrModeRel {
+tc_128719e8, TypeLD>, Enc_2e1979, AddrModeRel {
let Inst{31-21} = 0b00110010100;
let isPredicated = 1;
let hasNewValue = 1;
@@ -12515,7 +12569,7 @@ def L4_ploadrubf_abs : HInst<
(outs IntRegs:$Rd32),
(ins PredRegs:$Pt4, u32_0Imm:$Ii),
"if (!$Pt4) $Rd32 = memub(#$Ii)",
-LD_tc_ld_SLOT01, TypeLD>, Enc_13344657, AddrModeRel {
+tc_136c4786, TypeLD>, Enc_2301d6, AddrModeRel {
let Inst{7-5} = 0b100;
let Inst{13-11} = 0b101;
let Inst{31-21} = 0b10011111001;
@@ -12525,8 +12579,8 @@ let hasNewValue = 1;
let opNewValue = 0;
let addrMode = Absolute;
let accessSize = ByteAccess;
-let isExtended = 1;
let mayLoad = 1;
+let isExtended = 1;
let CextOpcode = "L2_loadrub";
let BaseOpcode = "L4_loadrub_abs";
let DecoderNamespace = "MustExtend";
@@ -12540,7 +12594,7 @@ def L4_ploadrubf_rr : HInst<
(outs IntRegs:$Rd32),
(ins PredRegs:$Pv4, IntRegs:$Rs32, IntRegs:$Rt32, u2_0Imm:$Ii),
"if (!$Pv4) $Rd32 = memub($Rs32+$Rt32<<#$Ii)",
-V4LDST_tc_ld_SLOT01, TypeLD>, Enc_1793896, AddrModeRel {
+tc_9dafb7d3, TypeLD>, Enc_2e1979, AddrModeRel {
let Inst{31-21} = 0b00110001001;
let isPredicated = 1;
let isPredicatedFalse = 1;
@@ -12557,7 +12611,7 @@ def L4_ploadrubfnew_abs : HInst<
(outs IntRegs:$Rd32),
(ins PredRegs:$Pt4, u32_0Imm:$Ii),
"if (!$Pt4.new) $Rd32 = memub(#$Ii)",
-LD_tc_ld_SLOT01, TypeLD>, Enc_13344657, AddrModeRel {
+tc_b5f5a094, TypeLD>, Enc_2301d6, AddrModeRel {
let Inst{7-5} = 0b100;
let Inst{13-11} = 0b111;
let Inst{31-21} = 0b10011111001;
@@ -12567,9 +12621,9 @@ let hasNewValue = 1;
let opNewValue = 0;
let addrMode = Absolute;
let accessSize = ByteAccess;
-let isExtended = 1;
let isPredicatedNew = 1;
let mayLoad = 1;
+let isExtended = 1;
let CextOpcode = "L2_loadrub";
let BaseOpcode = "L4_loadrub_abs";
let DecoderNamespace = "MustExtend";
@@ -12583,7 +12637,7 @@ def L4_ploadrubfnew_rr : HInst<
(outs IntRegs:$Rd32),
(ins PredRegs:$Pv4, IntRegs:$Rs32, IntRegs:$Rt32, u2_0Imm:$Ii),
"if (!$Pv4.new) $Rd32 = memub($Rs32+$Rt32<<#$Ii)",
-V4LDST_tc_ld_SLOT01, TypeLD>, Enc_1793896, AddrModeRel {
+tc_128719e8, TypeLD>, Enc_2e1979, AddrModeRel {
let Inst{31-21} = 0b00110011001;
let isPredicated = 1;
let isPredicatedFalse = 1;
@@ -12601,7 +12655,7 @@ def L4_ploadrubt_abs : HInst<
(outs IntRegs:$Rd32),
(ins PredRegs:$Pt4, u32_0Imm:$Ii),
"if ($Pt4) $Rd32 = memub(#$Ii)",
-LD_tc_ld_SLOT01, TypeLD>, Enc_13344657, AddrModeRel {
+tc_136c4786, TypeLD>, Enc_2301d6, AddrModeRel {
let Inst{7-5} = 0b100;
let Inst{13-11} = 0b100;
let Inst{31-21} = 0b10011111001;
@@ -12610,8 +12664,8 @@ let hasNewValue = 1;
let opNewValue = 0;
let addrMode = Absolute;
let accessSize = ByteAccess;
-let isExtended = 1;
let mayLoad = 1;
+let isExtended = 1;
let CextOpcode = "L2_loadrub";
let BaseOpcode = "L4_loadrub_abs";
let DecoderNamespace = "MustExtend";
@@ -12625,7 +12679,7 @@ def L4_ploadrubt_rr : HInst<
(outs IntRegs:$Rd32),
(ins PredRegs:$Pv4, IntRegs:$Rs32, IntRegs:$Rt32, u2_0Imm:$Ii),
"if ($Pv4) $Rd32 = memub($Rs32+$Rt32<<#$Ii)",
-V4LDST_tc_ld_SLOT01, TypeLD>, Enc_1793896, AddrModeRel {
+tc_9dafb7d3, TypeLD>, Enc_2e1979, AddrModeRel {
let Inst{31-21} = 0b00110000001;
let isPredicated = 1;
let hasNewValue = 1;
@@ -12641,7 +12695,7 @@ def L4_ploadrubtnew_abs : HInst<
(outs IntRegs:$Rd32),
(ins PredRegs:$Pt4, u32_0Imm:$Ii),
"if ($Pt4.new) $Rd32 = memub(#$Ii)",
-LD_tc_ld_SLOT01, TypeLD>, Enc_13344657, AddrModeRel {
+tc_b5f5a094, TypeLD>, Enc_2301d6, AddrModeRel {
let Inst{7-5} = 0b100;
let Inst{13-11} = 0b110;
let Inst{31-21} = 0b10011111001;
@@ -12650,9 +12704,9 @@ let hasNewValue = 1;
let opNewValue = 0;
let addrMode = Absolute;
let accessSize = ByteAccess;
-let isExtended = 1;
let isPredicatedNew = 1;
let mayLoad = 1;
+let isExtended = 1;
let CextOpcode = "L2_loadrub";
let BaseOpcode = "L4_loadrub_abs";
let DecoderNamespace = "MustExtend";
@@ -12666,7 +12720,7 @@ def L4_ploadrubtnew_rr : HInst<
(outs IntRegs:$Rd32),
(ins PredRegs:$Pv4, IntRegs:$Rs32, IntRegs:$Rt32, u2_0Imm:$Ii),
"if ($Pv4.new) $Rd32 = memub($Rs32+$Rt32<<#$Ii)",
-V4LDST_tc_ld_SLOT01, TypeLD>, Enc_1793896, AddrModeRel {
+tc_128719e8, TypeLD>, Enc_2e1979, AddrModeRel {
let Inst{31-21} = 0b00110010001;
let isPredicated = 1;
let hasNewValue = 1;
@@ -12683,7 +12737,7 @@ def L4_ploadruhf_abs : HInst<
(outs IntRegs:$Rd32),
(ins PredRegs:$Pt4, u32_0Imm:$Ii),
"if (!$Pt4) $Rd32 = memuh(#$Ii)",
-LD_tc_ld_SLOT01, TypeLD>, Enc_13344657, AddrModeRel {
+tc_136c4786, TypeLD>, Enc_2301d6, AddrModeRel {
let Inst{7-5} = 0b100;
let Inst{13-11} = 0b101;
let Inst{31-21} = 0b10011111011;
@@ -12693,8 +12747,8 @@ let hasNewValue = 1;
let opNewValue = 0;
let addrMode = Absolute;
let accessSize = HalfWordAccess;
-let isExtended = 1;
let mayLoad = 1;
+let isExtended = 1;
let CextOpcode = "L2_loadruh";
let BaseOpcode = "L4_loadruh_abs";
let DecoderNamespace = "MustExtend";
@@ -12708,7 +12762,7 @@ def L4_ploadruhf_rr : HInst<
(outs IntRegs:$Rd32),
(ins PredRegs:$Pv4, IntRegs:$Rs32, IntRegs:$Rt32, u2_0Imm:$Ii),
"if (!$Pv4) $Rd32 = memuh($Rs32+$Rt32<<#$Ii)",
-V4LDST_tc_ld_SLOT01, TypeLD>, Enc_1793896, AddrModeRel {
+tc_9dafb7d3, TypeLD>, Enc_2e1979, AddrModeRel {
let Inst{31-21} = 0b00110001011;
let isPredicated = 1;
let isPredicatedFalse = 1;
@@ -12725,7 +12779,7 @@ def L4_ploadruhfnew_abs : HInst<
(outs IntRegs:$Rd32),
(ins PredRegs:$Pt4, u32_0Imm:$Ii),
"if (!$Pt4.new) $Rd32 = memuh(#$Ii)",
-LD_tc_ld_SLOT01, TypeLD>, Enc_13344657, AddrModeRel {
+tc_b5f5a094, TypeLD>, Enc_2301d6, AddrModeRel {
let Inst{7-5} = 0b100;
let Inst{13-11} = 0b111;
let Inst{31-21} = 0b10011111011;
@@ -12735,9 +12789,9 @@ let hasNewValue = 1;
let opNewValue = 0;
let addrMode = Absolute;
let accessSize = HalfWordAccess;
-let isExtended = 1;
let isPredicatedNew = 1;
let mayLoad = 1;
+let isExtended = 1;
let CextOpcode = "L2_loadruh";
let BaseOpcode = "L4_loadruh_abs";
let DecoderNamespace = "MustExtend";
@@ -12751,7 +12805,7 @@ def L4_ploadruhfnew_rr : HInst<
(outs IntRegs:$Rd32),
(ins PredRegs:$Pv4, IntRegs:$Rs32, IntRegs:$Rt32, u2_0Imm:$Ii),
"if (!$Pv4.new) $Rd32 = memuh($Rs32+$Rt32<<#$Ii)",
-V4LDST_tc_ld_SLOT01, TypeLD>, Enc_1793896, AddrModeRel {
+tc_128719e8, TypeLD>, Enc_2e1979, AddrModeRel {
let Inst{31-21} = 0b00110011011;
let isPredicated = 1;
let isPredicatedFalse = 1;
@@ -12769,7 +12823,7 @@ def L4_ploadruht_abs : HInst<
(outs IntRegs:$Rd32),
(ins PredRegs:$Pt4, u32_0Imm:$Ii),
"if ($Pt4) $Rd32 = memuh(#$Ii)",
-LD_tc_ld_SLOT01, TypeLD>, Enc_13344657, AddrModeRel {
+tc_136c4786, TypeLD>, Enc_2301d6, AddrModeRel {
let Inst{7-5} = 0b100;
let Inst{13-11} = 0b100;
let Inst{31-21} = 0b10011111011;
@@ -12778,8 +12832,8 @@ let hasNewValue = 1;
let opNewValue = 0;
let addrMode = Absolute;
let accessSize = HalfWordAccess;
-let isExtended = 1;
let mayLoad = 1;
+let isExtended = 1;
let CextOpcode = "L2_loadruh";
let BaseOpcode = "L4_loadruh_abs";
let DecoderNamespace = "MustExtend";
@@ -12793,7 +12847,7 @@ def L4_ploadruht_rr : HInst<
(outs IntRegs:$Rd32),
(ins PredRegs:$Pv4, IntRegs:$Rs32, IntRegs:$Rt32, u2_0Imm:$Ii),
"if ($Pv4) $Rd32 = memuh($Rs32+$Rt32<<#$Ii)",
-V4LDST_tc_ld_SLOT01, TypeLD>, Enc_1793896, AddrModeRel {
+tc_9dafb7d3, TypeLD>, Enc_2e1979, AddrModeRel {
let Inst{31-21} = 0b00110000011;
let isPredicated = 1;
let hasNewValue = 1;
@@ -12809,7 +12863,7 @@ def L4_ploadruhtnew_abs : HInst<
(outs IntRegs:$Rd32),
(ins PredRegs:$Pt4, u32_0Imm:$Ii),
"if ($Pt4.new) $Rd32 = memuh(#$Ii)",
-LD_tc_ld_SLOT01, TypeLD>, Enc_13344657, AddrModeRel {
+tc_b5f5a094, TypeLD>, Enc_2301d6, AddrModeRel {
let Inst{7-5} = 0b100;
let Inst{13-11} = 0b110;
let Inst{31-21} = 0b10011111011;
@@ -12818,9 +12872,9 @@ let hasNewValue = 1;
let opNewValue = 0;
let addrMode = Absolute;
let accessSize = HalfWordAccess;
-let isExtended = 1;
let isPredicatedNew = 1;
let mayLoad = 1;
+let isExtended = 1;
let CextOpcode = "L2_loadruh";
let BaseOpcode = "L4_loadruh_abs";
let DecoderNamespace = "MustExtend";
@@ -12834,7 +12888,7 @@ def L4_ploadruhtnew_rr : HInst<
(outs IntRegs:$Rd32),
(ins PredRegs:$Pv4, IntRegs:$Rs32, IntRegs:$Rt32, u2_0Imm:$Ii),
"if ($Pv4.new) $Rd32 = memuh($Rs32+$Rt32<<#$Ii)",
-V4LDST_tc_ld_SLOT01, TypeLD>, Enc_1793896, AddrModeRel {
+tc_128719e8, TypeLD>, Enc_2e1979, AddrModeRel {
let Inst{31-21} = 0b00110010011;
let isPredicated = 1;
let hasNewValue = 1;
@@ -12851,7 +12905,7 @@ def L4_return : HInst<
(outs),
(ins),
"dealloc_return",
-LD_tc_3or4stall_SLOT0, TypeLD>, Enc_0, PredNewRel {
+tc_dcfee7ae, TypeLD>, Enc_3a3d62, PredNewRel {
let Inst{4-0} = 0b11110;
let Inst{13-5} = 0b000000000;
let Inst{31-21} = 0b10010110000;
@@ -12873,7 +12927,7 @@ def L4_return_f : HInst<
(outs),
(ins PredRegs:$Pv4),
"if (!$Pv4) dealloc_return",
-LD_tc_3or4stall_SLOT0, TypeLD>, Enc_12711252, PredNewRel {
+tc_9ce7a5ab, TypeLD>, Enc_b7fad3, PredNewRel {
let Inst{4-0} = 0b11110;
let Inst{7-5} = 0b000;
let Inst{13-10} = 0b1100;
@@ -12885,8 +12939,8 @@ let isTerminator = 1;
let isIndirectBranch = 1;
let accessSize = DoubleWordAccess;
let cofMax1 = 1;
-let isReturn = 1;
let mayLoad = 1;
+let isReturn = 1;
let Uses = [R30];
let Defs = [PC, R29, R30, R31];
let BaseOpcode = "L4_return";
@@ -12896,7 +12950,7 @@ def L4_return_fnew_pnt : HInst<
(outs),
(ins PredRegs:$Pv4),
"if (!$Pv4.new) dealloc_return:nt",
-LD_tc_3or4stall_SLOT0, TypeLD>, Enc_12711252, PredNewRel {
+tc_3993c58b, TypeLD>, Enc_b7fad3, PredNewRel {
let Inst{4-0} = 0b11110;
let Inst{7-5} = 0b000;
let Inst{13-10} = 0b1010;
@@ -12908,9 +12962,9 @@ let isTerminator = 1;
let isIndirectBranch = 1;
let accessSize = DoubleWordAccess;
let cofMax1 = 1;
-let isReturn = 1;
let isPredicatedNew = 1;
let mayLoad = 1;
+let isReturn = 1;
let Uses = [R30];
let Defs = [PC, R29, R30, R31];
let BaseOpcode = "L4_return";
@@ -12920,7 +12974,7 @@ def L4_return_fnew_pt : HInst<
(outs),
(ins PredRegs:$Pv4),
"if (!$Pv4.new) dealloc_return:t",
-LD_tc_3or4stall_SLOT0, TypeLD>, Enc_12711252, PredNewRel {
+tc_3993c58b, TypeLD>, Enc_b7fad3, PredNewRel {
let Inst{4-0} = 0b11110;
let Inst{7-5} = 0b000;
let Inst{13-10} = 0b1110;
@@ -12932,9 +12986,9 @@ let isTerminator = 1;
let isIndirectBranch = 1;
let accessSize = DoubleWordAccess;
let cofMax1 = 1;
-let isReturn = 1;
let isPredicatedNew = 1;
let mayLoad = 1;
+let isReturn = 1;
let Uses = [R30];
let Defs = [PC, R29, R30, R31];
let BaseOpcode = "L4_return";
@@ -12944,7 +12998,7 @@ def L4_return_t : HInst<
(outs),
(ins PredRegs:$Pv4),
"if ($Pv4) dealloc_return",
-LD_tc_3or4stall_SLOT0, TypeLD>, Enc_12711252, PredNewRel {
+tc_9ce7a5ab, TypeLD>, Enc_b7fad3, PredNewRel {
let Inst{4-0} = 0b11110;
let Inst{7-5} = 0b000;
let Inst{13-10} = 0b0100;
@@ -12955,8 +13009,8 @@ let isTerminator = 1;
let isIndirectBranch = 1;
let accessSize = DoubleWordAccess;
let cofMax1 = 1;
-let isReturn = 1;
let mayLoad = 1;
+let isReturn = 1;
let Uses = [R30];
let Defs = [PC, R29, R30, R31];
let BaseOpcode = "L4_return";
@@ -12966,7 +13020,7 @@ def L4_return_tnew_pnt : HInst<
(outs),
(ins PredRegs:$Pv4),
"if ($Pv4.new) dealloc_return:nt",
-LD_tc_3or4stall_SLOT0, TypeLD>, Enc_12711252, PredNewRel {
+tc_3993c58b, TypeLD>, Enc_b7fad3, PredNewRel {
let Inst{4-0} = 0b11110;
let Inst{7-5} = 0b000;
let Inst{13-10} = 0b0010;
@@ -12977,9 +13031,9 @@ let isTerminator = 1;
let isIndirectBranch = 1;
let accessSize = DoubleWordAccess;
let cofMax1 = 1;
-let isReturn = 1;
let isPredicatedNew = 1;
let mayLoad = 1;
+let isReturn = 1;
let Uses = [R30];
let Defs = [PC, R29, R30, R31];
let BaseOpcode = "L4_return";
@@ -12989,7 +13043,7 @@ def L4_return_tnew_pt : HInst<
(outs),
(ins PredRegs:$Pv4),
"if ($Pv4.new) dealloc_return:t",
-LD_tc_3or4stall_SLOT0, TypeLD>, Enc_12711252, PredNewRel {
+tc_3993c58b, TypeLD>, Enc_b7fad3, PredNewRel {
let Inst{4-0} = 0b11110;
let Inst{7-5} = 0b000;
let Inst{13-10} = 0b0110;
@@ -13000,9 +13054,9 @@ let isTerminator = 1;
let isIndirectBranch = 1;
let accessSize = DoubleWordAccess;
let cofMax1 = 1;
-let isReturn = 1;
let isPredicatedNew = 1;
let mayLoad = 1;
+let isReturn = 1;
let Uses = [R30];
let Defs = [PC, R29, R30, R31];
let BaseOpcode = "L4_return";
@@ -13012,14 +13066,14 @@ def L4_sub_memopb_io : HInst<
(outs),
(ins IntRegs:$Rs32, u32_0Imm:$Ii, IntRegs:$Rt32),
"memb($Rs32+#$Ii) -= $Rt32",
-V4LDST_tc_st_SLOT0, TypeV4LDST>, Enc_11849200 {
+tc_a9c993d9, TypeV4LDST>, Enc_d44e31 {
let Inst{6-5} = 0b01;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00111110000;
let addrMode = BaseImmOffset;
let accessSize = ByteAccess;
-let mayStore = 1;
let mayLoad = 1;
+let mayStore = 1;
let isExtendable = 1;
let opExtendable = 1;
let isExtentSigned = 0;
@@ -13030,7 +13084,7 @@ def L4_sub_memopb_zomap : HInst<
(outs),
(ins IntRegs:$Rs32, IntRegs:$Rt32),
"memb($Rs32) -= $Rt32",
-PSEUDO, TypeMAPPING> {
+tc_a9c993d9, TypeMAPPING> {
let isPseudo = 1;
let isCodeGenOnly = 1;
}
@@ -13038,14 +13092,14 @@ def L4_sub_memoph_io : HInst<
(outs),
(ins IntRegs:$Rs32, u31_1Imm:$Ii, IntRegs:$Rt32),
"memh($Rs32+#$Ii) -= $Rt32",
-V4LDST_tc_st_SLOT0, TypeV4LDST>, Enc_8849208 {
+tc_a9c993d9, TypeV4LDST>, Enc_163a3c {
let Inst{6-5} = 0b01;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00111110001;
let addrMode = BaseImmOffset;
let accessSize = HalfWordAccess;
-let mayStore = 1;
let mayLoad = 1;
+let mayStore = 1;
let isExtendable = 1;
let opExtendable = 1;
let isExtentSigned = 0;
@@ -13056,7 +13110,7 @@ def L4_sub_memoph_zomap : HInst<
(outs),
(ins IntRegs:$Rs32, IntRegs:$Rt32),
"memh($Rs32) -= $Rt32",
-PSEUDO, TypeMAPPING> {
+tc_a9c993d9, TypeMAPPING> {
let isPseudo = 1;
let isCodeGenOnly = 1;
}
@@ -13064,14 +13118,14 @@ def L4_sub_memopw_io : HInst<
(outs),
(ins IntRegs:$Rs32, u30_2Imm:$Ii, IntRegs:$Rt32),
"memw($Rs32+#$Ii) -= $Rt32",
-V4LDST_tc_st_SLOT0, TypeV4LDST>, Enc_9849208 {
+tc_a9c993d9, TypeV4LDST>, Enc_226535 {
let Inst{6-5} = 0b01;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00111110010;
let addrMode = BaseImmOffset;
let accessSize = WordAccess;
-let mayStore = 1;
let mayLoad = 1;
+let mayStore = 1;
let isExtendable = 1;
let opExtendable = 1;
let isExtentSigned = 0;
@@ -13082,7 +13136,7 @@ def L4_sub_memopw_zomap : HInst<
(outs),
(ins IntRegs:$Rs32, IntRegs:$Rt32),
"memw($Rs32) -= $Rt32",
-PSEUDO, TypeMAPPING> {
+tc_a9c993d9, TypeMAPPING> {
let isPseudo = 1;
let isCodeGenOnly = 1;
}
@@ -13090,7 +13144,7 @@ def M2_acci : HInst<
(outs IntRegs:$Rx32),
(ins IntRegs:$Rx32in, IntRegs:$Rs32, IntRegs:$Rt32),
"$Rx32 += add($Rs32,$Rt32)",
-M_tc_2_acc_SLOT23, TypeM>, Enc_9223889, ImmRegRel {
+tc_c0cd91a8, TypeM>, Enc_2ae154, ImmRegRel {
let Inst{7-5} = 0b001;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101111000;
@@ -13105,7 +13159,7 @@ def M2_accii : HInst<
(outs IntRegs:$Rx32),
(ins IntRegs:$Rx32in, IntRegs:$Rs32, s32_0Imm:$Ii),
"$Rx32 += add($Rs32,#$Ii)",
-M_tc_2_acc_SLOT23, TypeM>, Enc_11522288, ImmRegRel {
+tc_c0cd91a8, TypeM>, Enc_c90aca, ImmRegRel {
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11100010000;
let hasNewValue = 1;
@@ -13124,7 +13178,7 @@ def M2_cmaci_s0 : HInst<
(outs DoubleRegs:$Rxx32),
(ins DoubleRegs:$Rxx32in, IntRegs:$Rs32, IntRegs:$Rt32),
"$Rxx32 += cmpyi($Rs32,$Rt32)",
-M_tc_3x_acc_SLOT23, TypeM>, Enc_1409050 {
+tc_8cb685d9, TypeM>, Enc_61f0b0 {
let Inst{7-5} = 0b001;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11100111000;
@@ -13135,7 +13189,7 @@ def M2_cmacr_s0 : HInst<
(outs DoubleRegs:$Rxx32),
(ins DoubleRegs:$Rxx32in, IntRegs:$Rs32, IntRegs:$Rt32),
"$Rxx32 += cmpyr($Rs32,$Rt32)",
-M_tc_3x_acc_SLOT23, TypeM>, Enc_1409050 {
+tc_8cb685d9, TypeM>, Enc_61f0b0 {
let Inst{7-5} = 0b010;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11100111000;
@@ -13146,7 +13200,7 @@ def M2_cmacs_s0 : HInst<
(outs DoubleRegs:$Rxx32),
(ins DoubleRegs:$Rxx32in, IntRegs:$Rs32, IntRegs:$Rt32),
"$Rxx32 += cmpy($Rs32,$Rt32):sat",
-M_tc_3x_acc_SLOT23, TypeM>, Enc_1409050 {
+tc_8cb685d9, TypeM>, Enc_61f0b0 {
let Inst{7-5} = 0b110;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11100111000;
@@ -13158,7 +13212,7 @@ def M2_cmacs_s1 : HInst<
(outs DoubleRegs:$Rxx32),
(ins DoubleRegs:$Rxx32in, IntRegs:$Rs32, IntRegs:$Rt32),
"$Rxx32 += cmpy($Rs32,$Rt32):<<1:sat",
-M_tc_3x_acc_SLOT23, TypeM>, Enc_1409050 {
+tc_8cb685d9, TypeM>, Enc_61f0b0 {
let Inst{7-5} = 0b110;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11100111100;
@@ -13170,7 +13224,7 @@ def M2_cmacsc_s0 : HInst<
(outs DoubleRegs:$Rxx32),
(ins DoubleRegs:$Rxx32in, IntRegs:$Rs32, IntRegs:$Rt32),
"$Rxx32 += cmpy($Rs32,$Rt32*):sat",
-M_tc_3x_acc_SLOT23, TypeM>, Enc_1409050 {
+tc_8cb685d9, TypeM>, Enc_61f0b0 {
let Inst{7-5} = 0b110;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11100111010;
@@ -13182,7 +13236,7 @@ def M2_cmacsc_s1 : HInst<
(outs DoubleRegs:$Rxx32),
(ins DoubleRegs:$Rxx32in, IntRegs:$Rs32, IntRegs:$Rt32),
"$Rxx32 += cmpy($Rs32,$Rt32*):<<1:sat",
-M_tc_3x_acc_SLOT23, TypeM>, Enc_1409050 {
+tc_8cb685d9, TypeM>, Enc_61f0b0 {
let Inst{7-5} = 0b110;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11100111110;
@@ -13194,7 +13248,7 @@ def M2_cmpyi_s0 : HInst<
(outs DoubleRegs:$Rdd32),
(ins IntRegs:$Rs32, IntRegs:$Rt32),
"$Rdd32 = cmpyi($Rs32,$Rt32)",
-M_tc_3x_SLOT23, TypeM>, Enc_1997594 {
+tc_8c8041e6, TypeM>, Enc_be32a5 {
let Inst{7-5} = 0b001;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11100101000;
@@ -13204,7 +13258,7 @@ def M2_cmpyr_s0 : HInst<
(outs DoubleRegs:$Rdd32),
(ins IntRegs:$Rs32, IntRegs:$Rt32),
"$Rdd32 = cmpyr($Rs32,$Rt32)",
-M_tc_3x_SLOT23, TypeM>, Enc_1997594 {
+tc_8c8041e6, TypeM>, Enc_be32a5 {
let Inst{7-5} = 0b010;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11100101000;
@@ -13214,7 +13268,7 @@ def M2_cmpyrs_s0 : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rs32, IntRegs:$Rt32),
"$Rd32 = cmpy($Rs32,$Rt32):rnd:sat",
-M_tc_3x_SLOT23, TypeM>, Enc_14071773 {
+tc_8c8041e6, TypeM>, Enc_5ab2be {
let Inst{7-5} = 0b110;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101101001;
@@ -13227,7 +13281,7 @@ def M2_cmpyrs_s1 : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rs32, IntRegs:$Rt32),
"$Rd32 = cmpy($Rs32,$Rt32):<<1:rnd:sat",
-M_tc_3x_SLOT23, TypeM>, Enc_14071773 {
+tc_8c8041e6, TypeM>, Enc_5ab2be {
let Inst{7-5} = 0b110;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101101101;
@@ -13240,7 +13294,7 @@ def M2_cmpyrsc_s0 : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rs32, IntRegs:$Rt32),
"$Rd32 = cmpy($Rs32,$Rt32*):rnd:sat",
-M_tc_3x_SLOT23, TypeM>, Enc_14071773 {
+tc_8c8041e6, TypeM>, Enc_5ab2be {
let Inst{7-5} = 0b110;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101101011;
@@ -13253,7 +13307,7 @@ def M2_cmpyrsc_s1 : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rs32, IntRegs:$Rt32),
"$Rd32 = cmpy($Rs32,$Rt32*):<<1:rnd:sat",
-M_tc_3x_SLOT23, TypeM>, Enc_14071773 {
+tc_8c8041e6, TypeM>, Enc_5ab2be {
let Inst{7-5} = 0b110;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101101111;
@@ -13266,7 +13320,7 @@ def M2_cmpys_s0 : HInst<
(outs DoubleRegs:$Rdd32),
(ins IntRegs:$Rs32, IntRegs:$Rt32),
"$Rdd32 = cmpy($Rs32,$Rt32):sat",
-M_tc_3x_SLOT23, TypeM>, Enc_1997594 {
+tc_8c8041e6, TypeM>, Enc_be32a5 {
let Inst{7-5} = 0b110;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11100101000;
@@ -13277,7 +13331,7 @@ def M2_cmpys_s1 : HInst<
(outs DoubleRegs:$Rdd32),
(ins IntRegs:$Rs32, IntRegs:$Rt32),
"$Rdd32 = cmpy($Rs32,$Rt32):<<1:sat",
-M_tc_3x_SLOT23, TypeM>, Enc_1997594 {
+tc_8c8041e6, TypeM>, Enc_be32a5 {
let Inst{7-5} = 0b110;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11100101100;
@@ -13288,7 +13342,7 @@ def M2_cmpysc_s0 : HInst<
(outs DoubleRegs:$Rdd32),
(ins IntRegs:$Rs32, IntRegs:$Rt32),
"$Rdd32 = cmpy($Rs32,$Rt32*):sat",
-M_tc_3x_SLOT23, TypeM>, Enc_1997594 {
+tc_8c8041e6, TypeM>, Enc_be32a5 {
let Inst{7-5} = 0b110;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11100101010;
@@ -13299,7 +13353,7 @@ def M2_cmpysc_s1 : HInst<
(outs DoubleRegs:$Rdd32),
(ins IntRegs:$Rs32, IntRegs:$Rt32),
"$Rdd32 = cmpy($Rs32,$Rt32*):<<1:sat",
-M_tc_3x_SLOT23, TypeM>, Enc_1997594 {
+tc_8c8041e6, TypeM>, Enc_be32a5 {
let Inst{7-5} = 0b110;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11100101110;
@@ -13310,7 +13364,7 @@ def M2_cnacs_s0 : HInst<
(outs DoubleRegs:$Rxx32),
(ins DoubleRegs:$Rxx32in, IntRegs:$Rs32, IntRegs:$Rt32),
"$Rxx32 -= cmpy($Rs32,$Rt32):sat",
-M_tc_3x_acc_SLOT23, TypeM>, Enc_1409050 {
+tc_8cb685d9, TypeM>, Enc_61f0b0 {
let Inst{7-5} = 0b111;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11100111000;
@@ -13322,7 +13376,7 @@ def M2_cnacs_s1 : HInst<
(outs DoubleRegs:$Rxx32),
(ins DoubleRegs:$Rxx32in, IntRegs:$Rs32, IntRegs:$Rt32),
"$Rxx32 -= cmpy($Rs32,$Rt32):<<1:sat",
-M_tc_3x_acc_SLOT23, TypeM>, Enc_1409050 {
+tc_8cb685d9, TypeM>, Enc_61f0b0 {
let Inst{7-5} = 0b111;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11100111100;
@@ -13334,7 +13388,7 @@ def M2_cnacsc_s0 : HInst<
(outs DoubleRegs:$Rxx32),
(ins DoubleRegs:$Rxx32in, IntRegs:$Rs32, IntRegs:$Rt32),
"$Rxx32 -= cmpy($Rs32,$Rt32*):sat",
-M_tc_3x_acc_SLOT23, TypeM>, Enc_1409050 {
+tc_8cb685d9, TypeM>, Enc_61f0b0 {
let Inst{7-5} = 0b111;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11100111010;
@@ -13346,7 +13400,7 @@ def M2_cnacsc_s1 : HInst<
(outs DoubleRegs:$Rxx32),
(ins DoubleRegs:$Rxx32in, IntRegs:$Rs32, IntRegs:$Rt32),
"$Rxx32 -= cmpy($Rs32,$Rt32*):<<1:sat",
-M_tc_3x_acc_SLOT23, TypeM>, Enc_1409050 {
+tc_8cb685d9, TypeM>, Enc_61f0b0 {
let Inst{7-5} = 0b111;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11100111110;
@@ -13358,7 +13412,7 @@ def M2_dpmpyss_acc_s0 : HInst<
(outs DoubleRegs:$Rxx32),
(ins DoubleRegs:$Rxx32in, IntRegs:$Rs32, IntRegs:$Rt32),
"$Rxx32 += mpy($Rs32,$Rt32)",
-M_tc_3x_acc_SLOT23, TypeM>, Enc_1409050 {
+tc_8cb685d9, TypeM>, Enc_61f0b0 {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11100111000;
@@ -13369,7 +13423,7 @@ def M2_dpmpyss_nac_s0 : HInst<
(outs DoubleRegs:$Rxx32),
(ins DoubleRegs:$Rxx32in, IntRegs:$Rs32, IntRegs:$Rt32),
"$Rxx32 -= mpy($Rs32,$Rt32)",
-M_tc_3x_acc_SLOT23, TypeM>, Enc_1409050 {
+tc_8cb685d9, TypeM>, Enc_61f0b0 {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11100111001;
@@ -13380,7 +13434,7 @@ def M2_dpmpyss_rnd_s0 : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rs32, IntRegs:$Rt32),
"$Rd32 = mpy($Rs32,$Rt32):rnd",
-M_tc_3x_SLOT23, TypeM>, Enc_14071773 {
+tc_8c8041e6, TypeM>, Enc_5ab2be {
let Inst{7-5} = 0b001;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101101001;
@@ -13392,7 +13446,7 @@ def M2_dpmpyss_s0 : HInst<
(outs DoubleRegs:$Rdd32),
(ins IntRegs:$Rs32, IntRegs:$Rt32),
"$Rdd32 = mpy($Rs32,$Rt32)",
-M_tc_3x_SLOT23, TypeM>, Enc_1997594 {
+tc_8c8041e6, TypeM>, Enc_be32a5 {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11100101000;
@@ -13402,7 +13456,7 @@ def M2_dpmpyuu_acc_s0 : HInst<
(outs DoubleRegs:$Rxx32),
(ins DoubleRegs:$Rxx32in, IntRegs:$Rs32, IntRegs:$Rt32),
"$Rxx32 += mpyu($Rs32,$Rt32)",
-M_tc_3x_acc_SLOT23, TypeM>, Enc_1409050 {
+tc_8cb685d9, TypeM>, Enc_61f0b0 {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11100111010;
@@ -13413,7 +13467,7 @@ def M2_dpmpyuu_nac_s0 : HInst<
(outs DoubleRegs:$Rxx32),
(ins DoubleRegs:$Rxx32in, IntRegs:$Rs32, IntRegs:$Rt32),
"$Rxx32 -= mpyu($Rs32,$Rt32)",
-M_tc_3x_acc_SLOT23, TypeM>, Enc_1409050 {
+tc_8cb685d9, TypeM>, Enc_61f0b0 {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11100111011;
@@ -13424,7 +13478,7 @@ def M2_dpmpyuu_s0 : HInst<
(outs DoubleRegs:$Rdd32),
(ins IntRegs:$Rs32, IntRegs:$Rt32),
"$Rdd32 = mpyu($Rs32,$Rt32)",
-M_tc_3x_SLOT23, TypeM>, Enc_1997594 {
+tc_8c8041e6, TypeM>, Enc_be32a5 {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11100101010;
@@ -13434,7 +13488,7 @@ def M2_hmmpyh_rs1 : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rs32, IntRegs:$Rt32),
"$Rd32 = mpy($Rs32,$Rt32.h):<<1:rnd:sat",
-M_tc_3x_SLOT23, TypeM>, Enc_14071773 {
+tc_8c8041e6, TypeM>, Enc_5ab2be {
let Inst{7-5} = 0b100;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101101101;
@@ -13447,7 +13501,7 @@ def M2_hmmpyh_s1 : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rs32, IntRegs:$Rt32),
"$Rd32 = mpy($Rs32,$Rt32.h):<<1:sat",
-M_tc_3x_SLOT23, TypeM>, Enc_14071773 {
+tc_8c8041e6, TypeM>, Enc_5ab2be {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101101101;
@@ -13460,7 +13514,7 @@ def M2_hmmpyl_rs1 : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rs32, IntRegs:$Rt32),
"$Rd32 = mpy($Rs32,$Rt32.l):<<1:rnd:sat",
-M_tc_3x_SLOT23, TypeM>, Enc_14071773 {
+tc_8c8041e6, TypeM>, Enc_5ab2be {
let Inst{7-5} = 0b100;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101101111;
@@ -13473,7 +13527,7 @@ def M2_hmmpyl_s1 : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rs32, IntRegs:$Rt32),
"$Rd32 = mpy($Rs32,$Rt32.l):<<1:sat",
-M_tc_3x_SLOT23, TypeM>, Enc_14071773 {
+tc_8c8041e6, TypeM>, Enc_5ab2be {
let Inst{7-5} = 0b001;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101101101;
@@ -13486,7 +13540,7 @@ def M2_maci : HInst<
(outs IntRegs:$Rx32),
(ins IntRegs:$Rx32in, IntRegs:$Rs32, IntRegs:$Rt32),
"$Rx32 += mpyi($Rs32,$Rt32)",
-M_tc_3x_acc_SLOT23, TypeM>, Enc_9223889, ImmRegRel {
+tc_8cb685d9, TypeM>, Enc_2ae154, ImmRegRel {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101111000;
@@ -13501,7 +13555,7 @@ def M2_macsin : HInst<
(outs IntRegs:$Rx32),
(ins IntRegs:$Rx32in, IntRegs:$Rs32, u32_0Imm:$Ii),
"$Rx32 -= mpyi($Rs32,#$Ii)",
-M_tc_3x_acc_SLOT23, TypeM>, Enc_11522288 {
+tc_a12a5971, TypeM>, Enc_c90aca {
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11100001100;
let hasNewValue = 1;
@@ -13519,7 +13573,7 @@ def M2_macsip : HInst<
(outs IntRegs:$Rx32),
(ins IntRegs:$Rx32in, IntRegs:$Rs32, u32_0Imm:$Ii),
"$Rx32 += mpyi($Rs32,#$Ii)",
-M_tc_3x_acc_SLOT23, TypeM>, Enc_11522288, ImmRegRel {
+tc_a12a5971, TypeM>, Enc_c90aca, ImmRegRel {
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11100001000;
let hasNewValue = 1;
@@ -13538,7 +13592,7 @@ def M2_mmachs_rs0 : HInst<
(outs DoubleRegs:$Rxx32),
(ins DoubleRegs:$Rxx32in, DoubleRegs:$Rss32, DoubleRegs:$Rtt32),
"$Rxx32 += vmpywoh($Rss32,$Rtt32):rnd:sat",
-M_tc_3x_acc_SLOT23, TypeM>, Enc_12702821 {
+tc_8cb685d9, TypeM>, Enc_88c16c {
let Inst{7-5} = 0b111;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101010001;
@@ -13550,7 +13604,7 @@ def M2_mmachs_rs1 : HInst<
(outs DoubleRegs:$Rxx32),
(ins DoubleRegs:$Rxx32in, DoubleRegs:$Rss32, DoubleRegs:$Rtt32),
"$Rxx32 += vmpywoh($Rss32,$Rtt32):<<1:rnd:sat",
-M_tc_3x_acc_SLOT23, TypeM>, Enc_12702821 {
+tc_8cb685d9, TypeM>, Enc_88c16c {
let Inst{7-5} = 0b111;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101010101;
@@ -13562,7 +13616,7 @@ def M2_mmachs_s0 : HInst<
(outs DoubleRegs:$Rxx32),
(ins DoubleRegs:$Rxx32in, DoubleRegs:$Rss32, DoubleRegs:$Rtt32),
"$Rxx32 += vmpywoh($Rss32,$Rtt32):sat",
-M_tc_3x_acc_SLOT23, TypeM>, Enc_12702821 {
+tc_8cb685d9, TypeM>, Enc_88c16c {
let Inst{7-5} = 0b111;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101010000;
@@ -13574,7 +13628,7 @@ def M2_mmachs_s1 : HInst<
(outs DoubleRegs:$Rxx32),
(ins DoubleRegs:$Rxx32in, DoubleRegs:$Rss32, DoubleRegs:$Rtt32),
"$Rxx32 += vmpywoh($Rss32,$Rtt32):<<1:sat",
-M_tc_3x_acc_SLOT23, TypeM>, Enc_12702821 {
+tc_8cb685d9, TypeM>, Enc_88c16c {
let Inst{7-5} = 0b111;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101010100;
@@ -13586,7 +13640,7 @@ def M2_mmacls_rs0 : HInst<
(outs DoubleRegs:$Rxx32),
(ins DoubleRegs:$Rxx32in, DoubleRegs:$Rss32, DoubleRegs:$Rtt32),
"$Rxx32 += vmpyweh($Rss32,$Rtt32):rnd:sat",
-M_tc_3x_acc_SLOT23, TypeM>, Enc_12702821 {
+tc_8cb685d9, TypeM>, Enc_88c16c {
let Inst{7-5} = 0b101;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101010001;
@@ -13598,7 +13652,7 @@ def M2_mmacls_rs1 : HInst<
(outs DoubleRegs:$Rxx32),
(ins DoubleRegs:$Rxx32in, DoubleRegs:$Rss32, DoubleRegs:$Rtt32),
"$Rxx32 += vmpyweh($Rss32,$Rtt32):<<1:rnd:sat",
-M_tc_3x_acc_SLOT23, TypeM>, Enc_12702821 {
+tc_8cb685d9, TypeM>, Enc_88c16c {
let Inst{7-5} = 0b101;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101010101;
@@ -13610,7 +13664,7 @@ def M2_mmacls_s0 : HInst<
(outs DoubleRegs:$Rxx32),
(ins DoubleRegs:$Rxx32in, DoubleRegs:$Rss32, DoubleRegs:$Rtt32),
"$Rxx32 += vmpyweh($Rss32,$Rtt32):sat",
-M_tc_3x_acc_SLOT23, TypeM>, Enc_12702821 {
+tc_8cb685d9, TypeM>, Enc_88c16c {
let Inst{7-5} = 0b101;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101010000;
@@ -13622,7 +13676,7 @@ def M2_mmacls_s1 : HInst<
(outs DoubleRegs:$Rxx32),
(ins DoubleRegs:$Rxx32in, DoubleRegs:$Rss32, DoubleRegs:$Rtt32),
"$Rxx32 += vmpyweh($Rss32,$Rtt32):<<1:sat",
-M_tc_3x_acc_SLOT23, TypeM>, Enc_12702821 {
+tc_8cb685d9, TypeM>, Enc_88c16c {
let Inst{7-5} = 0b101;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101010100;
@@ -13634,7 +13688,7 @@ def M2_mmacuhs_rs0 : HInst<
(outs DoubleRegs:$Rxx32),
(ins DoubleRegs:$Rxx32in, DoubleRegs:$Rss32, DoubleRegs:$Rtt32),
"$Rxx32 += vmpywouh($Rss32,$Rtt32):rnd:sat",
-M_tc_3x_acc_SLOT23, TypeM>, Enc_12702821 {
+tc_8cb685d9, TypeM>, Enc_88c16c {
let Inst{7-5} = 0b111;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101010011;
@@ -13646,7 +13700,7 @@ def M2_mmacuhs_rs1 : HInst<
(outs DoubleRegs:$Rxx32),
(ins DoubleRegs:$Rxx32in, DoubleRegs:$Rss32, DoubleRegs:$Rtt32),
"$Rxx32 += vmpywouh($Rss32,$Rtt32):<<1:rnd:sat",
-M_tc_3x_acc_SLOT23, TypeM>, Enc_12702821 {
+tc_8cb685d9, TypeM>, Enc_88c16c {
let Inst{7-5} = 0b111;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101010111;
@@ -13658,7 +13712,7 @@ def M2_mmacuhs_s0 : HInst<
(outs DoubleRegs:$Rxx32),
(ins DoubleRegs:$Rxx32in, DoubleRegs:$Rss32, DoubleRegs:$Rtt32),
"$Rxx32 += vmpywouh($Rss32,$Rtt32):sat",
-M_tc_3x_acc_SLOT23, TypeM>, Enc_12702821 {
+tc_8cb685d9, TypeM>, Enc_88c16c {
let Inst{7-5} = 0b111;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101010010;
@@ -13670,7 +13724,7 @@ def M2_mmacuhs_s1 : HInst<
(outs DoubleRegs:$Rxx32),
(ins DoubleRegs:$Rxx32in, DoubleRegs:$Rss32, DoubleRegs:$Rtt32),
"$Rxx32 += vmpywouh($Rss32,$Rtt32):<<1:sat",
-M_tc_3x_acc_SLOT23, TypeM>, Enc_12702821 {
+tc_8cb685d9, TypeM>, Enc_88c16c {
let Inst{7-5} = 0b111;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101010110;
@@ -13682,7 +13736,7 @@ def M2_mmaculs_rs0 : HInst<
(outs DoubleRegs:$Rxx32),
(ins DoubleRegs:$Rxx32in, DoubleRegs:$Rss32, DoubleRegs:$Rtt32),
"$Rxx32 += vmpyweuh($Rss32,$Rtt32):rnd:sat",
-M_tc_3x_acc_SLOT23, TypeM>, Enc_12702821 {
+tc_8cb685d9, TypeM>, Enc_88c16c {
let Inst{7-5} = 0b101;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101010011;
@@ -13694,7 +13748,7 @@ def M2_mmaculs_rs1 : HInst<
(outs DoubleRegs:$Rxx32),
(ins DoubleRegs:$Rxx32in, DoubleRegs:$Rss32, DoubleRegs:$Rtt32),
"$Rxx32 += vmpyweuh($Rss32,$Rtt32):<<1:rnd:sat",
-M_tc_3x_acc_SLOT23, TypeM>, Enc_12702821 {
+tc_8cb685d9, TypeM>, Enc_88c16c {
let Inst{7-5} = 0b101;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101010111;
@@ -13706,7 +13760,7 @@ def M2_mmaculs_s0 : HInst<
(outs DoubleRegs:$Rxx32),
(ins DoubleRegs:$Rxx32in, DoubleRegs:$Rss32, DoubleRegs:$Rtt32),
"$Rxx32 += vmpyweuh($Rss32,$Rtt32):sat",
-M_tc_3x_acc_SLOT23, TypeM>, Enc_12702821 {
+tc_8cb685d9, TypeM>, Enc_88c16c {
let Inst{7-5} = 0b101;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101010010;
@@ -13718,7 +13772,7 @@ def M2_mmaculs_s1 : HInst<
(outs DoubleRegs:$Rxx32),
(ins DoubleRegs:$Rxx32in, DoubleRegs:$Rss32, DoubleRegs:$Rtt32),
"$Rxx32 += vmpyweuh($Rss32,$Rtt32):<<1:sat",
-M_tc_3x_acc_SLOT23, TypeM>, Enc_12702821 {
+tc_8cb685d9, TypeM>, Enc_88c16c {
let Inst{7-5} = 0b101;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101010110;
@@ -13730,7 +13784,7 @@ def M2_mmpyh_rs0 : HInst<
(outs DoubleRegs:$Rdd32),
(ins DoubleRegs:$Rss32, DoubleRegs:$Rtt32),
"$Rdd32 = vmpywoh($Rss32,$Rtt32):rnd:sat",
-M_tc_3x_SLOT23, TypeM>, Enc_8333157 {
+tc_8c8041e6, TypeM>, Enc_a56825 {
let Inst{7-5} = 0b111;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101000001;
@@ -13741,7 +13795,7 @@ def M2_mmpyh_rs1 : HInst<
(outs DoubleRegs:$Rdd32),
(ins DoubleRegs:$Rss32, DoubleRegs:$Rtt32),
"$Rdd32 = vmpywoh($Rss32,$Rtt32):<<1:rnd:sat",
-M_tc_3x_SLOT23, TypeM>, Enc_8333157 {
+tc_8c8041e6, TypeM>, Enc_a56825 {
let Inst{7-5} = 0b111;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101000101;
@@ -13752,7 +13806,7 @@ def M2_mmpyh_s0 : HInst<
(outs DoubleRegs:$Rdd32),
(ins DoubleRegs:$Rss32, DoubleRegs:$Rtt32),
"$Rdd32 = vmpywoh($Rss32,$Rtt32):sat",
-M_tc_3x_SLOT23, TypeM>, Enc_8333157 {
+tc_8c8041e6, TypeM>, Enc_a56825 {
let Inst{7-5} = 0b111;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101000000;
@@ -13763,7 +13817,7 @@ def M2_mmpyh_s1 : HInst<
(outs DoubleRegs:$Rdd32),
(ins DoubleRegs:$Rss32, DoubleRegs:$Rtt32),
"$Rdd32 = vmpywoh($Rss32,$Rtt32):<<1:sat",
-M_tc_3x_SLOT23, TypeM>, Enc_8333157 {
+tc_8c8041e6, TypeM>, Enc_a56825 {
let Inst{7-5} = 0b111;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101000100;
@@ -13774,7 +13828,7 @@ def M2_mmpyl_rs0 : HInst<
(outs DoubleRegs:$Rdd32),
(ins DoubleRegs:$Rss32, DoubleRegs:$Rtt32),
"$Rdd32 = vmpyweh($Rss32,$Rtt32):rnd:sat",
-M_tc_3x_SLOT23, TypeM>, Enc_8333157 {
+tc_8c8041e6, TypeM>, Enc_a56825 {
let Inst{7-5} = 0b101;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101000001;
@@ -13785,7 +13839,7 @@ def M2_mmpyl_rs1 : HInst<
(outs DoubleRegs:$Rdd32),
(ins DoubleRegs:$Rss32, DoubleRegs:$Rtt32),
"$Rdd32 = vmpyweh($Rss32,$Rtt32):<<1:rnd:sat",
-M_tc_3x_SLOT23, TypeM>, Enc_8333157 {
+tc_8c8041e6, TypeM>, Enc_a56825 {
let Inst{7-5} = 0b101;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101000101;
@@ -13796,7 +13850,7 @@ def M2_mmpyl_s0 : HInst<
(outs DoubleRegs:$Rdd32),
(ins DoubleRegs:$Rss32, DoubleRegs:$Rtt32),
"$Rdd32 = vmpyweh($Rss32,$Rtt32):sat",
-M_tc_3x_SLOT23, TypeM>, Enc_8333157 {
+tc_8c8041e6, TypeM>, Enc_a56825 {
let Inst{7-5} = 0b101;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101000000;
@@ -13807,7 +13861,7 @@ def M2_mmpyl_s1 : HInst<
(outs DoubleRegs:$Rdd32),
(ins DoubleRegs:$Rss32, DoubleRegs:$Rtt32),
"$Rdd32 = vmpyweh($Rss32,$Rtt32):<<1:sat",
-M_tc_3x_SLOT23, TypeM>, Enc_8333157 {
+tc_8c8041e6, TypeM>, Enc_a56825 {
let Inst{7-5} = 0b101;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101000100;
@@ -13818,7 +13872,7 @@ def M2_mmpyuh_rs0 : HInst<
(outs DoubleRegs:$Rdd32),
(ins DoubleRegs:$Rss32, DoubleRegs:$Rtt32),
"$Rdd32 = vmpywouh($Rss32,$Rtt32):rnd:sat",
-M_tc_3x_SLOT23, TypeM>, Enc_8333157 {
+tc_8c8041e6, TypeM>, Enc_a56825 {
let Inst{7-5} = 0b111;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101000011;
@@ -13829,7 +13883,7 @@ def M2_mmpyuh_rs1 : HInst<
(outs DoubleRegs:$Rdd32),
(ins DoubleRegs:$Rss32, DoubleRegs:$Rtt32),
"$Rdd32 = vmpywouh($Rss32,$Rtt32):<<1:rnd:sat",
-M_tc_3x_SLOT23, TypeM>, Enc_8333157 {
+tc_8c8041e6, TypeM>, Enc_a56825 {
let Inst{7-5} = 0b111;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101000111;
@@ -13840,7 +13894,7 @@ def M2_mmpyuh_s0 : HInst<
(outs DoubleRegs:$Rdd32),
(ins DoubleRegs:$Rss32, DoubleRegs:$Rtt32),
"$Rdd32 = vmpywouh($Rss32,$Rtt32):sat",
-M_tc_3x_SLOT23, TypeM>, Enc_8333157 {
+tc_8c8041e6, TypeM>, Enc_a56825 {
let Inst{7-5} = 0b111;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101000010;
@@ -13851,7 +13905,7 @@ def M2_mmpyuh_s1 : HInst<
(outs DoubleRegs:$Rdd32),
(ins DoubleRegs:$Rss32, DoubleRegs:$Rtt32),
"$Rdd32 = vmpywouh($Rss32,$Rtt32):<<1:sat",
-M_tc_3x_SLOT23, TypeM>, Enc_8333157 {
+tc_8c8041e6, TypeM>, Enc_a56825 {
let Inst{7-5} = 0b111;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101000110;
@@ -13862,7 +13916,7 @@ def M2_mmpyul_rs0 : HInst<
(outs DoubleRegs:$Rdd32),
(ins DoubleRegs:$Rss32, DoubleRegs:$Rtt32),
"$Rdd32 = vmpyweuh($Rss32,$Rtt32):rnd:sat",
-M_tc_3x_SLOT23, TypeM>, Enc_8333157 {
+tc_8c8041e6, TypeM>, Enc_a56825 {
let Inst{7-5} = 0b101;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101000011;
@@ -13873,7 +13927,7 @@ def M2_mmpyul_rs1 : HInst<
(outs DoubleRegs:$Rdd32),
(ins DoubleRegs:$Rss32, DoubleRegs:$Rtt32),
"$Rdd32 = vmpyweuh($Rss32,$Rtt32):<<1:rnd:sat",
-M_tc_3x_SLOT23, TypeM>, Enc_8333157 {
+tc_8c8041e6, TypeM>, Enc_a56825 {
let Inst{7-5} = 0b101;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101000111;
@@ -13884,7 +13938,7 @@ def M2_mmpyul_s0 : HInst<
(outs DoubleRegs:$Rdd32),
(ins DoubleRegs:$Rss32, DoubleRegs:$Rtt32),
"$Rdd32 = vmpyweuh($Rss32,$Rtt32):sat",
-M_tc_3x_SLOT23, TypeM>, Enc_8333157 {
+tc_8c8041e6, TypeM>, Enc_a56825 {
let Inst{7-5} = 0b101;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101000010;
@@ -13895,7 +13949,7 @@ def M2_mmpyul_s1 : HInst<
(outs DoubleRegs:$Rdd32),
(ins DoubleRegs:$Rss32, DoubleRegs:$Rtt32),
"$Rdd32 = vmpyweuh($Rss32,$Rtt32):<<1:sat",
-M_tc_3x_SLOT23, TypeM>, Enc_8333157 {
+tc_8c8041e6, TypeM>, Enc_a56825 {
let Inst{7-5} = 0b101;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101000110;
@@ -13906,7 +13960,7 @@ def M2_mpy_acc_hh_s0 : HInst<
(outs IntRegs:$Rx32),
(ins IntRegs:$Rx32in, IntRegs:$Rs32, IntRegs:$Rt32),
"$Rx32 += mpy($Rs32.h,$Rt32.h)",
-M_tc_3x_acc_SLOT23, TypeM>, Enc_9223889 {
+tc_8cb685d9, TypeM>, Enc_2ae154 {
let Inst{7-5} = 0b011;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101110000;
@@ -13919,7 +13973,7 @@ def M2_mpy_acc_hh_s1 : HInst<
(outs IntRegs:$Rx32),
(ins IntRegs:$Rx32in, IntRegs:$Rs32, IntRegs:$Rt32),
"$Rx32 += mpy($Rs32.h,$Rt32.h):<<1",
-M_tc_3x_acc_SLOT23, TypeM>, Enc_9223889 {
+tc_8cb685d9, TypeM>, Enc_2ae154 {
let Inst{7-5} = 0b011;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101110100;
@@ -13932,7 +13986,7 @@ def M2_mpy_acc_hl_s0 : HInst<
(outs IntRegs:$Rx32),
(ins IntRegs:$Rx32in, IntRegs:$Rs32, IntRegs:$Rt32),
"$Rx32 += mpy($Rs32.h,$Rt32.l)",
-M_tc_3x_acc_SLOT23, TypeM>, Enc_9223889 {
+tc_8cb685d9, TypeM>, Enc_2ae154 {
let Inst{7-5} = 0b010;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101110000;
@@ -13945,7 +13999,7 @@ def M2_mpy_acc_hl_s1 : HInst<
(outs IntRegs:$Rx32),
(ins IntRegs:$Rx32in, IntRegs:$Rs32, IntRegs:$Rt32),
"$Rx32 += mpy($Rs32.h,$Rt32.l):<<1",
-M_tc_3x_acc_SLOT23, TypeM>, Enc_9223889 {
+tc_8cb685d9, TypeM>, Enc_2ae154 {
let Inst{7-5} = 0b010;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101110100;
@@ -13958,7 +14012,7 @@ def M2_mpy_acc_lh_s0 : HInst<
(outs IntRegs:$Rx32),
(ins IntRegs:$Rx32in, IntRegs:$Rs32, IntRegs:$Rt32),
"$Rx32 += mpy($Rs32.l,$Rt32.h)",
-M_tc_3x_acc_SLOT23, TypeM>, Enc_9223889 {
+tc_8cb685d9, TypeM>, Enc_2ae154 {
let Inst{7-5} = 0b001;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101110000;
@@ -13971,7 +14025,7 @@ def M2_mpy_acc_lh_s1 : HInst<
(outs IntRegs:$Rx32),
(ins IntRegs:$Rx32in, IntRegs:$Rs32, IntRegs:$Rt32),
"$Rx32 += mpy($Rs32.l,$Rt32.h):<<1",
-M_tc_3x_acc_SLOT23, TypeM>, Enc_9223889 {
+tc_8cb685d9, TypeM>, Enc_2ae154 {
let Inst{7-5} = 0b001;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101110100;
@@ -13984,7 +14038,7 @@ def M2_mpy_acc_ll_s0 : HInst<
(outs IntRegs:$Rx32),
(ins IntRegs:$Rx32in, IntRegs:$Rs32, IntRegs:$Rt32),
"$Rx32 += mpy($Rs32.l,$Rt32.l)",
-M_tc_3x_acc_SLOT23, TypeM>, Enc_9223889 {
+tc_8cb685d9, TypeM>, Enc_2ae154 {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101110000;
@@ -13997,7 +14051,7 @@ def M2_mpy_acc_ll_s1 : HInst<
(outs IntRegs:$Rx32),
(ins IntRegs:$Rx32in, IntRegs:$Rs32, IntRegs:$Rt32),
"$Rx32 += mpy($Rs32.l,$Rt32.l):<<1",
-M_tc_3x_acc_SLOT23, TypeM>, Enc_9223889 {
+tc_8cb685d9, TypeM>, Enc_2ae154 {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101110100;
@@ -14010,7 +14064,7 @@ def M2_mpy_acc_sat_hh_s0 : HInst<
(outs IntRegs:$Rx32),
(ins IntRegs:$Rx32in, IntRegs:$Rs32, IntRegs:$Rt32),
"$Rx32 += mpy($Rs32.h,$Rt32.h):sat",
-M_tc_3x_acc_SLOT23, TypeM>, Enc_9223889 {
+tc_8cb685d9, TypeM>, Enc_2ae154 {
let Inst{7-5} = 0b111;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101110000;
@@ -14024,7 +14078,7 @@ def M2_mpy_acc_sat_hh_s1 : HInst<
(outs IntRegs:$Rx32),
(ins IntRegs:$Rx32in, IntRegs:$Rs32, IntRegs:$Rt32),
"$Rx32 += mpy($Rs32.h,$Rt32.h):<<1:sat",
-M_tc_3x_acc_SLOT23, TypeM>, Enc_9223889 {
+tc_8cb685d9, TypeM>, Enc_2ae154 {
let Inst{7-5} = 0b111;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101110100;
@@ -14038,7 +14092,7 @@ def M2_mpy_acc_sat_hl_s0 : HInst<
(outs IntRegs:$Rx32),
(ins IntRegs:$Rx32in, IntRegs:$Rs32, IntRegs:$Rt32),
"$Rx32 += mpy($Rs32.h,$Rt32.l):sat",
-M_tc_3x_acc_SLOT23, TypeM>, Enc_9223889 {
+tc_8cb685d9, TypeM>, Enc_2ae154 {
let Inst{7-5} = 0b110;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101110000;
@@ -14052,7 +14106,7 @@ def M2_mpy_acc_sat_hl_s1 : HInst<
(outs IntRegs:$Rx32),
(ins IntRegs:$Rx32in, IntRegs:$Rs32, IntRegs:$Rt32),
"$Rx32 += mpy($Rs32.h,$Rt32.l):<<1:sat",
-M_tc_3x_acc_SLOT23, TypeM>, Enc_9223889 {
+tc_8cb685d9, TypeM>, Enc_2ae154 {
let Inst{7-5} = 0b110;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101110100;
@@ -14066,7 +14120,7 @@ def M2_mpy_acc_sat_lh_s0 : HInst<
(outs IntRegs:$Rx32),
(ins IntRegs:$Rx32in, IntRegs:$Rs32, IntRegs:$Rt32),
"$Rx32 += mpy($Rs32.l,$Rt32.h):sat",
-M_tc_3x_acc_SLOT23, TypeM>, Enc_9223889 {
+tc_8cb685d9, TypeM>, Enc_2ae154 {
let Inst{7-5} = 0b101;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101110000;
@@ -14080,7 +14134,7 @@ def M2_mpy_acc_sat_lh_s1 : HInst<
(outs IntRegs:$Rx32),
(ins IntRegs:$Rx32in, IntRegs:$Rs32, IntRegs:$Rt32),
"$Rx32 += mpy($Rs32.l,$Rt32.h):<<1:sat",
-M_tc_3x_acc_SLOT23, TypeM>, Enc_9223889 {
+tc_8cb685d9, TypeM>, Enc_2ae154 {
let Inst{7-5} = 0b101;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101110100;
@@ -14094,7 +14148,7 @@ def M2_mpy_acc_sat_ll_s0 : HInst<
(outs IntRegs:$Rx32),
(ins IntRegs:$Rx32in, IntRegs:$Rs32, IntRegs:$Rt32),
"$Rx32 += mpy($Rs32.l,$Rt32.l):sat",
-M_tc_3x_acc_SLOT23, TypeM>, Enc_9223889 {
+tc_8cb685d9, TypeM>, Enc_2ae154 {
let Inst{7-5} = 0b100;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101110000;
@@ -14108,7 +14162,7 @@ def M2_mpy_acc_sat_ll_s1 : HInst<
(outs IntRegs:$Rx32),
(ins IntRegs:$Rx32in, IntRegs:$Rs32, IntRegs:$Rt32),
"$Rx32 += mpy($Rs32.l,$Rt32.l):<<1:sat",
-M_tc_3x_acc_SLOT23, TypeM>, Enc_9223889 {
+tc_8cb685d9, TypeM>, Enc_2ae154 {
let Inst{7-5} = 0b100;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101110100;
@@ -14122,7 +14176,7 @@ def M2_mpy_hh_s0 : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rs32, IntRegs:$Rt32),
"$Rd32 = mpy($Rs32.h,$Rt32.h)",
-M_tc_3x_SLOT23, TypeM>, Enc_14071773 {
+tc_8c8041e6, TypeM>, Enc_5ab2be {
let Inst{7-5} = 0b011;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101100000;
@@ -14134,7 +14188,7 @@ def M2_mpy_hh_s1 : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rs32, IntRegs:$Rt32),
"$Rd32 = mpy($Rs32.h,$Rt32.h):<<1",
-M_tc_3x_SLOT23, TypeM>, Enc_14071773 {
+tc_8c8041e6, TypeM>, Enc_5ab2be {
let Inst{7-5} = 0b011;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101100100;
@@ -14146,7 +14200,7 @@ def M2_mpy_hl_s0 : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rs32, IntRegs:$Rt32),
"$Rd32 = mpy($Rs32.h,$Rt32.l)",
-M_tc_3x_SLOT23, TypeM>, Enc_14071773 {
+tc_8c8041e6, TypeM>, Enc_5ab2be {
let Inst{7-5} = 0b010;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101100000;
@@ -14158,7 +14212,7 @@ def M2_mpy_hl_s1 : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rs32, IntRegs:$Rt32),
"$Rd32 = mpy($Rs32.h,$Rt32.l):<<1",
-M_tc_3x_SLOT23, TypeM>, Enc_14071773 {
+tc_8c8041e6, TypeM>, Enc_5ab2be {
let Inst{7-5} = 0b010;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101100100;
@@ -14170,7 +14224,7 @@ def M2_mpy_lh_s0 : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rs32, IntRegs:$Rt32),
"$Rd32 = mpy($Rs32.l,$Rt32.h)",
-M_tc_3x_SLOT23, TypeM>, Enc_14071773 {
+tc_8c8041e6, TypeM>, Enc_5ab2be {
let Inst{7-5} = 0b001;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101100000;
@@ -14182,7 +14236,7 @@ def M2_mpy_lh_s1 : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rs32, IntRegs:$Rt32),
"$Rd32 = mpy($Rs32.l,$Rt32.h):<<1",
-M_tc_3x_SLOT23, TypeM>, Enc_14071773 {
+tc_8c8041e6, TypeM>, Enc_5ab2be {
let Inst{7-5} = 0b001;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101100100;
@@ -14194,7 +14248,7 @@ def M2_mpy_ll_s0 : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rs32, IntRegs:$Rt32),
"$Rd32 = mpy($Rs32.l,$Rt32.l)",
-M_tc_3x_SLOT23, TypeM>, Enc_14071773 {
+tc_8c8041e6, TypeM>, Enc_5ab2be {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101100000;
@@ -14206,7 +14260,7 @@ def M2_mpy_ll_s1 : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rs32, IntRegs:$Rt32),
"$Rd32 = mpy($Rs32.l,$Rt32.l):<<1",
-M_tc_3x_SLOT23, TypeM>, Enc_14071773 {
+tc_8c8041e6, TypeM>, Enc_5ab2be {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101100100;
@@ -14218,7 +14272,7 @@ def M2_mpy_nac_hh_s0 : HInst<
(outs IntRegs:$Rx32),
(ins IntRegs:$Rx32in, IntRegs:$Rs32, IntRegs:$Rt32),
"$Rx32 -= mpy($Rs32.h,$Rt32.h)",
-M_tc_3x_acc_SLOT23, TypeM>, Enc_9223889 {
+tc_8cb685d9, TypeM>, Enc_2ae154 {
let Inst{7-5} = 0b011;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101110001;
@@ -14231,7 +14285,7 @@ def M2_mpy_nac_hh_s1 : HInst<
(outs IntRegs:$Rx32),
(ins IntRegs:$Rx32in, IntRegs:$Rs32, IntRegs:$Rt32),
"$Rx32 -= mpy($Rs32.h,$Rt32.h):<<1",
-M_tc_3x_acc_SLOT23, TypeM>, Enc_9223889 {
+tc_8cb685d9, TypeM>, Enc_2ae154 {
let Inst{7-5} = 0b011;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101110101;
@@ -14244,7 +14298,7 @@ def M2_mpy_nac_hl_s0 : HInst<
(outs IntRegs:$Rx32),
(ins IntRegs:$Rx32in, IntRegs:$Rs32, IntRegs:$Rt32),
"$Rx32 -= mpy($Rs32.h,$Rt32.l)",
-M_tc_3x_acc_SLOT23, TypeM>, Enc_9223889 {
+tc_8cb685d9, TypeM>, Enc_2ae154 {
let Inst{7-5} = 0b010;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101110001;
@@ -14257,7 +14311,7 @@ def M2_mpy_nac_hl_s1 : HInst<
(outs IntRegs:$Rx32),
(ins IntRegs:$Rx32in, IntRegs:$Rs32, IntRegs:$Rt32),
"$Rx32 -= mpy($Rs32.h,$Rt32.l):<<1",
-M_tc_3x_acc_SLOT23, TypeM>, Enc_9223889 {
+tc_8cb685d9, TypeM>, Enc_2ae154 {
let Inst{7-5} = 0b010;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101110101;
@@ -14270,7 +14324,7 @@ def M2_mpy_nac_lh_s0 : HInst<
(outs IntRegs:$Rx32),
(ins IntRegs:$Rx32in, IntRegs:$Rs32, IntRegs:$Rt32),
"$Rx32 -= mpy($Rs32.l,$Rt32.h)",
-M_tc_3x_acc_SLOT23, TypeM>, Enc_9223889 {
+tc_8cb685d9, TypeM>, Enc_2ae154 {
let Inst{7-5} = 0b001;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101110001;
@@ -14283,7 +14337,7 @@ def M2_mpy_nac_lh_s1 : HInst<
(outs IntRegs:$Rx32),
(ins IntRegs:$Rx32in, IntRegs:$Rs32, IntRegs:$Rt32),
"$Rx32 -= mpy($Rs32.l,$Rt32.h):<<1",
-M_tc_3x_acc_SLOT23, TypeM>, Enc_9223889 {
+tc_8cb685d9, TypeM>, Enc_2ae154 {
let Inst{7-5} = 0b001;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101110101;
@@ -14296,7 +14350,7 @@ def M2_mpy_nac_ll_s0 : HInst<
(outs IntRegs:$Rx32),
(ins IntRegs:$Rx32in, IntRegs:$Rs32, IntRegs:$Rt32),
"$Rx32 -= mpy($Rs32.l,$Rt32.l)",
-M_tc_3x_acc_SLOT23, TypeM>, Enc_9223889 {
+tc_8cb685d9, TypeM>, Enc_2ae154 {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101110001;
@@ -14309,7 +14363,7 @@ def M2_mpy_nac_ll_s1 : HInst<
(outs IntRegs:$Rx32),
(ins IntRegs:$Rx32in, IntRegs:$Rs32, IntRegs:$Rt32),
"$Rx32 -= mpy($Rs32.l,$Rt32.l):<<1",
-M_tc_3x_acc_SLOT23, TypeM>, Enc_9223889 {
+tc_8cb685d9, TypeM>, Enc_2ae154 {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101110101;
@@ -14322,7 +14376,7 @@ def M2_mpy_nac_sat_hh_s0 : HInst<
(outs IntRegs:$Rx32),
(ins IntRegs:$Rx32in, IntRegs:$Rs32, IntRegs:$Rt32),
"$Rx32 -= mpy($Rs32.h,$Rt32.h):sat",
-M_tc_3x_acc_SLOT23, TypeM>, Enc_9223889 {
+tc_8cb685d9, TypeM>, Enc_2ae154 {
let Inst{7-5} = 0b111;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101110001;
@@ -14336,7 +14390,7 @@ def M2_mpy_nac_sat_hh_s1 : HInst<
(outs IntRegs:$Rx32),
(ins IntRegs:$Rx32in, IntRegs:$Rs32, IntRegs:$Rt32),
"$Rx32 -= mpy($Rs32.h,$Rt32.h):<<1:sat",
-M_tc_3x_acc_SLOT23, TypeM>, Enc_9223889 {
+tc_8cb685d9, TypeM>, Enc_2ae154 {
let Inst{7-5} = 0b111;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101110101;
@@ -14350,7 +14404,7 @@ def M2_mpy_nac_sat_hl_s0 : HInst<
(outs IntRegs:$Rx32),
(ins IntRegs:$Rx32in, IntRegs:$Rs32, IntRegs:$Rt32),
"$Rx32 -= mpy($Rs32.h,$Rt32.l):sat",
-M_tc_3x_acc_SLOT23, TypeM>, Enc_9223889 {
+tc_8cb685d9, TypeM>, Enc_2ae154 {
let Inst{7-5} = 0b110;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101110001;
@@ -14364,7 +14418,7 @@ def M2_mpy_nac_sat_hl_s1 : HInst<
(outs IntRegs:$Rx32),
(ins IntRegs:$Rx32in, IntRegs:$Rs32, IntRegs:$Rt32),
"$Rx32 -= mpy($Rs32.h,$Rt32.l):<<1:sat",
-M_tc_3x_acc_SLOT23, TypeM>, Enc_9223889 {
+tc_8cb685d9, TypeM>, Enc_2ae154 {
let Inst{7-5} = 0b110;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101110101;
@@ -14378,7 +14432,7 @@ def M2_mpy_nac_sat_lh_s0 : HInst<
(outs IntRegs:$Rx32),
(ins IntRegs:$Rx32in, IntRegs:$Rs32, IntRegs:$Rt32),
"$Rx32 -= mpy($Rs32.l,$Rt32.h):sat",
-M_tc_3x_acc_SLOT23, TypeM>, Enc_9223889 {
+tc_8cb685d9, TypeM>, Enc_2ae154 {
let Inst{7-5} = 0b101;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101110001;
@@ -14392,7 +14446,7 @@ def M2_mpy_nac_sat_lh_s1 : HInst<
(outs IntRegs:$Rx32),
(ins IntRegs:$Rx32in, IntRegs:$Rs32, IntRegs:$Rt32),
"$Rx32 -= mpy($Rs32.l,$Rt32.h):<<1:sat",
-M_tc_3x_acc_SLOT23, TypeM>, Enc_9223889 {
+tc_8cb685d9, TypeM>, Enc_2ae154 {
let Inst{7-5} = 0b101;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101110101;
@@ -14406,7 +14460,7 @@ def M2_mpy_nac_sat_ll_s0 : HInst<
(outs IntRegs:$Rx32),
(ins IntRegs:$Rx32in, IntRegs:$Rs32, IntRegs:$Rt32),
"$Rx32 -= mpy($Rs32.l,$Rt32.l):sat",
-M_tc_3x_acc_SLOT23, TypeM>, Enc_9223889 {
+tc_8cb685d9, TypeM>, Enc_2ae154 {
let Inst{7-5} = 0b100;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101110001;
@@ -14420,7 +14474,7 @@ def M2_mpy_nac_sat_ll_s1 : HInst<
(outs IntRegs:$Rx32),
(ins IntRegs:$Rx32in, IntRegs:$Rs32, IntRegs:$Rt32),
"$Rx32 -= mpy($Rs32.l,$Rt32.l):<<1:sat",
-M_tc_3x_acc_SLOT23, TypeM>, Enc_9223889 {
+tc_8cb685d9, TypeM>, Enc_2ae154 {
let Inst{7-5} = 0b100;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101110101;
@@ -14434,7 +14488,7 @@ def M2_mpy_rnd_hh_s0 : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rs32, IntRegs:$Rt32),
"$Rd32 = mpy($Rs32.h,$Rt32.h):rnd",
-M_tc_3x_SLOT23, TypeM>, Enc_14071773 {
+tc_8c8041e6, TypeM>, Enc_5ab2be {
let Inst{7-5} = 0b011;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101100001;
@@ -14446,7 +14500,7 @@ def M2_mpy_rnd_hh_s1 : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rs32, IntRegs:$Rt32),
"$Rd32 = mpy($Rs32.h,$Rt32.h):<<1:rnd",
-M_tc_3x_SLOT23, TypeM>, Enc_14071773 {
+tc_8c8041e6, TypeM>, Enc_5ab2be {
let Inst{7-5} = 0b011;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101100101;
@@ -14458,7 +14512,7 @@ def M2_mpy_rnd_hl_s0 : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rs32, IntRegs:$Rt32),
"$Rd32 = mpy($Rs32.h,$Rt32.l):rnd",
-M_tc_3x_SLOT23, TypeM>, Enc_14071773 {
+tc_8c8041e6, TypeM>, Enc_5ab2be {
let Inst{7-5} = 0b010;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101100001;
@@ -14470,7 +14524,7 @@ def M2_mpy_rnd_hl_s1 : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rs32, IntRegs:$Rt32),
"$Rd32 = mpy($Rs32.h,$Rt32.l):<<1:rnd",
-M_tc_3x_SLOT23, TypeM>, Enc_14071773 {
+tc_8c8041e6, TypeM>, Enc_5ab2be {
let Inst{7-5} = 0b010;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101100101;
@@ -14482,7 +14536,7 @@ def M2_mpy_rnd_lh_s0 : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rs32, IntRegs:$Rt32),
"$Rd32 = mpy($Rs32.l,$Rt32.h):rnd",
-M_tc_3x_SLOT23, TypeM>, Enc_14071773 {
+tc_8c8041e6, TypeM>, Enc_5ab2be {
let Inst{7-5} = 0b001;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101100001;
@@ -14494,7 +14548,7 @@ def M2_mpy_rnd_lh_s1 : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rs32, IntRegs:$Rt32),
"$Rd32 = mpy($Rs32.l,$Rt32.h):<<1:rnd",
-M_tc_3x_SLOT23, TypeM>, Enc_14071773 {
+tc_8c8041e6, TypeM>, Enc_5ab2be {
let Inst{7-5} = 0b001;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101100101;
@@ -14506,7 +14560,7 @@ def M2_mpy_rnd_ll_s0 : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rs32, IntRegs:$Rt32),
"$Rd32 = mpy($Rs32.l,$Rt32.l):rnd",
-M_tc_3x_SLOT23, TypeM>, Enc_14071773 {
+tc_8c8041e6, TypeM>, Enc_5ab2be {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101100001;
@@ -14518,7 +14572,7 @@ def M2_mpy_rnd_ll_s1 : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rs32, IntRegs:$Rt32),
"$Rd32 = mpy($Rs32.l,$Rt32.l):<<1:rnd",
-M_tc_3x_SLOT23, TypeM>, Enc_14071773 {
+tc_8c8041e6, TypeM>, Enc_5ab2be {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101100101;
@@ -14530,7 +14584,7 @@ def M2_mpy_sat_hh_s0 : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rs32, IntRegs:$Rt32),
"$Rd32 = mpy($Rs32.h,$Rt32.h):sat",
-M_tc_3x_SLOT23, TypeM>, Enc_14071773 {
+tc_8c8041e6, TypeM>, Enc_5ab2be {
let Inst{7-5} = 0b111;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101100000;
@@ -14543,7 +14597,7 @@ def M2_mpy_sat_hh_s1 : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rs32, IntRegs:$Rt32),
"$Rd32 = mpy($Rs32.h,$Rt32.h):<<1:sat",
-M_tc_3x_SLOT23, TypeM>, Enc_14071773 {
+tc_8c8041e6, TypeM>, Enc_5ab2be {
let Inst{7-5} = 0b111;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101100100;
@@ -14556,7 +14610,7 @@ def M2_mpy_sat_hl_s0 : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rs32, IntRegs:$Rt32),
"$Rd32 = mpy($Rs32.h,$Rt32.l):sat",
-M_tc_3x_SLOT23, TypeM>, Enc_14071773 {
+tc_8c8041e6, TypeM>, Enc_5ab2be {
let Inst{7-5} = 0b110;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101100000;
@@ -14569,7 +14623,7 @@ def M2_mpy_sat_hl_s1 : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rs32, IntRegs:$Rt32),
"$Rd32 = mpy($Rs32.h,$Rt32.l):<<1:sat",
-M_tc_3x_SLOT23, TypeM>, Enc_14071773 {
+tc_8c8041e6, TypeM>, Enc_5ab2be {
let Inst{7-5} = 0b110;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101100100;
@@ -14582,7 +14636,7 @@ def M2_mpy_sat_lh_s0 : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rs32, IntRegs:$Rt32),
"$Rd32 = mpy($Rs32.l,$Rt32.h):sat",
-M_tc_3x_SLOT23, TypeM>, Enc_14071773 {
+tc_8c8041e6, TypeM>, Enc_5ab2be {
let Inst{7-5} = 0b101;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101100000;
@@ -14595,7 +14649,7 @@ def M2_mpy_sat_lh_s1 : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rs32, IntRegs:$Rt32),
"$Rd32 = mpy($Rs32.l,$Rt32.h):<<1:sat",
-M_tc_3x_SLOT23, TypeM>, Enc_14071773 {
+tc_8c8041e6, TypeM>, Enc_5ab2be {
let Inst{7-5} = 0b101;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101100100;
@@ -14608,7 +14662,7 @@ def M2_mpy_sat_ll_s0 : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rs32, IntRegs:$Rt32),
"$Rd32 = mpy($Rs32.l,$Rt32.l):sat",
-M_tc_3x_SLOT23, TypeM>, Enc_14071773 {
+tc_8c8041e6, TypeM>, Enc_5ab2be {
let Inst{7-5} = 0b100;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101100000;
@@ -14621,7 +14675,7 @@ def M2_mpy_sat_ll_s1 : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rs32, IntRegs:$Rt32),
"$Rd32 = mpy($Rs32.l,$Rt32.l):<<1:sat",
-M_tc_3x_SLOT23, TypeM>, Enc_14071773 {
+tc_8c8041e6, TypeM>, Enc_5ab2be {
let Inst{7-5} = 0b100;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101100100;
@@ -14634,7 +14688,7 @@ def M2_mpy_sat_rnd_hh_s0 : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rs32, IntRegs:$Rt32),
"$Rd32 = mpy($Rs32.h,$Rt32.h):rnd:sat",
-M_tc_3x_SLOT23, TypeM>, Enc_14071773 {
+tc_8c8041e6, TypeM>, Enc_5ab2be {
let Inst{7-5} = 0b111;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101100001;
@@ -14647,7 +14701,7 @@ def M2_mpy_sat_rnd_hh_s1 : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rs32, IntRegs:$Rt32),
"$Rd32 = mpy($Rs32.h,$Rt32.h):<<1:rnd:sat",
-M_tc_3x_SLOT23, TypeM>, Enc_14071773 {
+tc_8c8041e6, TypeM>, Enc_5ab2be {
let Inst{7-5} = 0b111;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101100101;
@@ -14660,7 +14714,7 @@ def M2_mpy_sat_rnd_hl_s0 : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rs32, IntRegs:$Rt32),
"$Rd32 = mpy($Rs32.h,$Rt32.l):rnd:sat",
-M_tc_3x_SLOT23, TypeM>, Enc_14071773 {
+tc_8c8041e6, TypeM>, Enc_5ab2be {
let Inst{7-5} = 0b110;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101100001;
@@ -14673,7 +14727,7 @@ def M2_mpy_sat_rnd_hl_s1 : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rs32, IntRegs:$Rt32),
"$Rd32 = mpy($Rs32.h,$Rt32.l):<<1:rnd:sat",
-M_tc_3x_SLOT23, TypeM>, Enc_14071773 {
+tc_8c8041e6, TypeM>, Enc_5ab2be {
let Inst{7-5} = 0b110;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101100101;
@@ -14686,7 +14740,7 @@ def M2_mpy_sat_rnd_lh_s0 : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rs32, IntRegs:$Rt32),
"$Rd32 = mpy($Rs32.l,$Rt32.h):rnd:sat",
-M_tc_3x_SLOT23, TypeM>, Enc_14071773 {
+tc_8c8041e6, TypeM>, Enc_5ab2be {
let Inst{7-5} = 0b101;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101100001;
@@ -14699,7 +14753,7 @@ def M2_mpy_sat_rnd_lh_s1 : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rs32, IntRegs:$Rt32),
"$Rd32 = mpy($Rs32.l,$Rt32.h):<<1:rnd:sat",
-M_tc_3x_SLOT23, TypeM>, Enc_14071773 {
+tc_8c8041e6, TypeM>, Enc_5ab2be {
let Inst{7-5} = 0b101;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101100101;
@@ -14712,7 +14766,7 @@ def M2_mpy_sat_rnd_ll_s0 : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rs32, IntRegs:$Rt32),
"$Rd32 = mpy($Rs32.l,$Rt32.l):rnd:sat",
-M_tc_3x_SLOT23, TypeM>, Enc_14071773 {
+tc_8c8041e6, TypeM>, Enc_5ab2be {
let Inst{7-5} = 0b100;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101100001;
@@ -14725,7 +14779,7 @@ def M2_mpy_sat_rnd_ll_s1 : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rs32, IntRegs:$Rt32),
"$Rd32 = mpy($Rs32.l,$Rt32.l):<<1:rnd:sat",
-M_tc_3x_SLOT23, TypeM>, Enc_14071773 {
+tc_8c8041e6, TypeM>, Enc_5ab2be {
let Inst{7-5} = 0b100;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101100101;
@@ -14738,7 +14792,7 @@ def M2_mpy_up : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rs32, IntRegs:$Rt32),
"$Rd32 = mpy($Rs32,$Rt32)",
-M_tc_3x_SLOT23, TypeM>, Enc_14071773 {
+tc_8c8041e6, TypeM>, Enc_5ab2be {
let Inst{7-5} = 0b001;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101101000;
@@ -14750,7 +14804,7 @@ def M2_mpy_up_s1 : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rs32, IntRegs:$Rt32),
"$Rd32 = mpy($Rs32,$Rt32):<<1",
-M_tc_3x_SLOT23, TypeM>, Enc_14071773 {
+tc_8c8041e6, TypeM>, Enc_5ab2be {
let Inst{7-5} = 0b010;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101101101;
@@ -14762,7 +14816,7 @@ def M2_mpy_up_s1_sat : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rs32, IntRegs:$Rt32),
"$Rd32 = mpy($Rs32,$Rt32):<<1:sat",
-M_tc_3x_SLOT23, TypeM>, Enc_14071773 {
+tc_8c8041e6, TypeM>, Enc_5ab2be {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101101111;
@@ -14775,7 +14829,7 @@ def M2_mpyd_acc_hh_s0 : HInst<
(outs DoubleRegs:$Rxx32),
(ins DoubleRegs:$Rxx32in, IntRegs:$Rs32, IntRegs:$Rt32),
"$Rxx32 += mpy($Rs32.h,$Rt32.h)",
-M_tc_3x_acc_SLOT23, TypeM>, Enc_1409050 {
+tc_8cb685d9, TypeM>, Enc_61f0b0 {
let Inst{7-5} = 0b011;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11100110000;
@@ -14786,7 +14840,7 @@ def M2_mpyd_acc_hh_s1 : HInst<
(outs DoubleRegs:$Rxx32),
(ins DoubleRegs:$Rxx32in, IntRegs:$Rs32, IntRegs:$Rt32),
"$Rxx32 += mpy($Rs32.h,$Rt32.h):<<1",
-M_tc_3x_acc_SLOT23, TypeM>, Enc_1409050 {
+tc_8cb685d9, TypeM>, Enc_61f0b0 {
let Inst{7-5} = 0b011;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11100110100;
@@ -14797,7 +14851,7 @@ def M2_mpyd_acc_hl_s0 : HInst<
(outs DoubleRegs:$Rxx32),
(ins DoubleRegs:$Rxx32in, IntRegs:$Rs32, IntRegs:$Rt32),
"$Rxx32 += mpy($Rs32.h,$Rt32.l)",
-M_tc_3x_acc_SLOT23, TypeM>, Enc_1409050 {
+tc_8cb685d9, TypeM>, Enc_61f0b0 {
let Inst{7-5} = 0b010;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11100110000;
@@ -14808,7 +14862,7 @@ def M2_mpyd_acc_hl_s1 : HInst<
(outs DoubleRegs:$Rxx32),
(ins DoubleRegs:$Rxx32in, IntRegs:$Rs32, IntRegs:$Rt32),
"$Rxx32 += mpy($Rs32.h,$Rt32.l):<<1",
-M_tc_3x_acc_SLOT23, TypeM>, Enc_1409050 {
+tc_8cb685d9, TypeM>, Enc_61f0b0 {
let Inst{7-5} = 0b010;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11100110100;
@@ -14819,7 +14873,7 @@ def M2_mpyd_acc_lh_s0 : HInst<
(outs DoubleRegs:$Rxx32),
(ins DoubleRegs:$Rxx32in, IntRegs:$Rs32, IntRegs:$Rt32),
"$Rxx32 += mpy($Rs32.l,$Rt32.h)",
-M_tc_3x_acc_SLOT23, TypeM>, Enc_1409050 {
+tc_8cb685d9, TypeM>, Enc_61f0b0 {
let Inst{7-5} = 0b001;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11100110000;
@@ -14830,7 +14884,7 @@ def M2_mpyd_acc_lh_s1 : HInst<
(outs DoubleRegs:$Rxx32),
(ins DoubleRegs:$Rxx32in, IntRegs:$Rs32, IntRegs:$Rt32),
"$Rxx32 += mpy($Rs32.l,$Rt32.h):<<1",
-M_tc_3x_acc_SLOT23, TypeM>, Enc_1409050 {
+tc_8cb685d9, TypeM>, Enc_61f0b0 {
let Inst{7-5} = 0b001;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11100110100;
@@ -14841,7 +14895,7 @@ def M2_mpyd_acc_ll_s0 : HInst<
(outs DoubleRegs:$Rxx32),
(ins DoubleRegs:$Rxx32in, IntRegs:$Rs32, IntRegs:$Rt32),
"$Rxx32 += mpy($Rs32.l,$Rt32.l)",
-M_tc_3x_acc_SLOT23, TypeM>, Enc_1409050 {
+tc_8cb685d9, TypeM>, Enc_61f0b0 {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11100110000;
@@ -14852,7 +14906,7 @@ def M2_mpyd_acc_ll_s1 : HInst<
(outs DoubleRegs:$Rxx32),
(ins DoubleRegs:$Rxx32in, IntRegs:$Rs32, IntRegs:$Rt32),
"$Rxx32 += mpy($Rs32.l,$Rt32.l):<<1",
-M_tc_3x_acc_SLOT23, TypeM>, Enc_1409050 {
+tc_8cb685d9, TypeM>, Enc_61f0b0 {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11100110100;
@@ -14863,7 +14917,7 @@ def M2_mpyd_hh_s0 : HInst<
(outs DoubleRegs:$Rdd32),
(ins IntRegs:$Rs32, IntRegs:$Rt32),
"$Rdd32 = mpy($Rs32.h,$Rt32.h)",
-M_tc_3x_SLOT23, TypeM>, Enc_1997594 {
+tc_8c8041e6, TypeM>, Enc_be32a5 {
let Inst{7-5} = 0b011;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11100100000;
@@ -14873,7 +14927,7 @@ def M2_mpyd_hh_s1 : HInst<
(outs DoubleRegs:$Rdd32),
(ins IntRegs:$Rs32, IntRegs:$Rt32),
"$Rdd32 = mpy($Rs32.h,$Rt32.h):<<1",
-M_tc_3x_SLOT23, TypeM>, Enc_1997594 {
+tc_8c8041e6, TypeM>, Enc_be32a5 {
let Inst{7-5} = 0b011;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11100100100;
@@ -14883,7 +14937,7 @@ def M2_mpyd_hl_s0 : HInst<
(outs DoubleRegs:$Rdd32),
(ins IntRegs:$Rs32, IntRegs:$Rt32),
"$Rdd32 = mpy($Rs32.h,$Rt32.l)",
-M_tc_3x_SLOT23, TypeM>, Enc_1997594 {
+tc_8c8041e6, TypeM>, Enc_be32a5 {
let Inst{7-5} = 0b010;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11100100000;
@@ -14893,7 +14947,7 @@ def M2_mpyd_hl_s1 : HInst<
(outs DoubleRegs:$Rdd32),
(ins IntRegs:$Rs32, IntRegs:$Rt32),
"$Rdd32 = mpy($Rs32.h,$Rt32.l):<<1",
-M_tc_3x_SLOT23, TypeM>, Enc_1997594 {
+tc_8c8041e6, TypeM>, Enc_be32a5 {
let Inst{7-5} = 0b010;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11100100100;
@@ -14903,7 +14957,7 @@ def M2_mpyd_lh_s0 : HInst<
(outs DoubleRegs:$Rdd32),
(ins IntRegs:$Rs32, IntRegs:$Rt32),
"$Rdd32 = mpy($Rs32.l,$Rt32.h)",
-M_tc_3x_SLOT23, TypeM>, Enc_1997594 {
+tc_8c8041e6, TypeM>, Enc_be32a5 {
let Inst{7-5} = 0b001;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11100100000;
@@ -14913,7 +14967,7 @@ def M2_mpyd_lh_s1 : HInst<
(outs DoubleRegs:$Rdd32),
(ins IntRegs:$Rs32, IntRegs:$Rt32),
"$Rdd32 = mpy($Rs32.l,$Rt32.h):<<1",
-M_tc_3x_SLOT23, TypeM>, Enc_1997594 {
+tc_8c8041e6, TypeM>, Enc_be32a5 {
let Inst{7-5} = 0b001;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11100100100;
@@ -14923,7 +14977,7 @@ def M2_mpyd_ll_s0 : HInst<
(outs DoubleRegs:$Rdd32),
(ins IntRegs:$Rs32, IntRegs:$Rt32),
"$Rdd32 = mpy($Rs32.l,$Rt32.l)",
-M_tc_3x_SLOT23, TypeM>, Enc_1997594 {
+tc_8c8041e6, TypeM>, Enc_be32a5 {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11100100000;
@@ -14933,7 +14987,7 @@ def M2_mpyd_ll_s1 : HInst<
(outs DoubleRegs:$Rdd32),
(ins IntRegs:$Rs32, IntRegs:$Rt32),
"$Rdd32 = mpy($Rs32.l,$Rt32.l):<<1",
-M_tc_3x_SLOT23, TypeM>, Enc_1997594 {
+tc_8c8041e6, TypeM>, Enc_be32a5 {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11100100100;
@@ -14943,7 +14997,7 @@ def M2_mpyd_nac_hh_s0 : HInst<
(outs DoubleRegs:$Rxx32),
(ins DoubleRegs:$Rxx32in, IntRegs:$Rs32, IntRegs:$Rt32),
"$Rxx32 -= mpy($Rs32.h,$Rt32.h)",
-M_tc_3x_acc_SLOT23, TypeM>, Enc_1409050 {
+tc_8cb685d9, TypeM>, Enc_61f0b0 {
let Inst{7-5} = 0b011;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11100110001;
@@ -14954,7 +15008,7 @@ def M2_mpyd_nac_hh_s1 : HInst<
(outs DoubleRegs:$Rxx32),
(ins DoubleRegs:$Rxx32in, IntRegs:$Rs32, IntRegs:$Rt32),
"$Rxx32 -= mpy($Rs32.h,$Rt32.h):<<1",
-M_tc_3x_acc_SLOT23, TypeM>, Enc_1409050 {
+tc_8cb685d9, TypeM>, Enc_61f0b0 {
let Inst{7-5} = 0b011;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11100110101;
@@ -14965,7 +15019,7 @@ def M2_mpyd_nac_hl_s0 : HInst<
(outs DoubleRegs:$Rxx32),
(ins DoubleRegs:$Rxx32in, IntRegs:$Rs32, IntRegs:$Rt32),
"$Rxx32 -= mpy($Rs32.h,$Rt32.l)",
-M_tc_3x_acc_SLOT23, TypeM>, Enc_1409050 {
+tc_8cb685d9, TypeM>, Enc_61f0b0 {
let Inst{7-5} = 0b010;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11100110001;
@@ -14976,7 +15030,7 @@ def M2_mpyd_nac_hl_s1 : HInst<
(outs DoubleRegs:$Rxx32),
(ins DoubleRegs:$Rxx32in, IntRegs:$Rs32, IntRegs:$Rt32),
"$Rxx32 -= mpy($Rs32.h,$Rt32.l):<<1",
-M_tc_3x_acc_SLOT23, TypeM>, Enc_1409050 {
+tc_8cb685d9, TypeM>, Enc_61f0b0 {
let Inst{7-5} = 0b010;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11100110101;
@@ -14987,7 +15041,7 @@ def M2_mpyd_nac_lh_s0 : HInst<
(outs DoubleRegs:$Rxx32),
(ins DoubleRegs:$Rxx32in, IntRegs:$Rs32, IntRegs:$Rt32),
"$Rxx32 -= mpy($Rs32.l,$Rt32.h)",
-M_tc_3x_acc_SLOT23, TypeM>, Enc_1409050 {
+tc_8cb685d9, TypeM>, Enc_61f0b0 {
let Inst{7-5} = 0b001;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11100110001;
@@ -14998,7 +15052,7 @@ def M2_mpyd_nac_lh_s1 : HInst<
(outs DoubleRegs:$Rxx32),
(ins DoubleRegs:$Rxx32in, IntRegs:$Rs32, IntRegs:$Rt32),
"$Rxx32 -= mpy($Rs32.l,$Rt32.h):<<1",
-M_tc_3x_acc_SLOT23, TypeM>, Enc_1409050 {
+tc_8cb685d9, TypeM>, Enc_61f0b0 {
let Inst{7-5} = 0b001;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11100110101;
@@ -15009,7 +15063,7 @@ def M2_mpyd_nac_ll_s0 : HInst<
(outs DoubleRegs:$Rxx32),
(ins DoubleRegs:$Rxx32in, IntRegs:$Rs32, IntRegs:$Rt32),
"$Rxx32 -= mpy($Rs32.l,$Rt32.l)",
-M_tc_3x_acc_SLOT23, TypeM>, Enc_1409050 {
+tc_8cb685d9, TypeM>, Enc_61f0b0 {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11100110001;
@@ -15020,7 +15074,7 @@ def M2_mpyd_nac_ll_s1 : HInst<
(outs DoubleRegs:$Rxx32),
(ins DoubleRegs:$Rxx32in, IntRegs:$Rs32, IntRegs:$Rt32),
"$Rxx32 -= mpy($Rs32.l,$Rt32.l):<<1",
-M_tc_3x_acc_SLOT23, TypeM>, Enc_1409050 {
+tc_8cb685d9, TypeM>, Enc_61f0b0 {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11100110101;
@@ -15031,7 +15085,7 @@ def M2_mpyd_rnd_hh_s0 : HInst<
(outs DoubleRegs:$Rdd32),
(ins IntRegs:$Rs32, IntRegs:$Rt32),
"$Rdd32 = mpy($Rs32.h,$Rt32.h):rnd",
-M_tc_3x_SLOT23, TypeM>, Enc_1997594 {
+tc_8c8041e6, TypeM>, Enc_be32a5 {
let Inst{7-5} = 0b011;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11100100001;
@@ -15041,7 +15095,7 @@ def M2_mpyd_rnd_hh_s1 : HInst<
(outs DoubleRegs:$Rdd32),
(ins IntRegs:$Rs32, IntRegs:$Rt32),
"$Rdd32 = mpy($Rs32.h,$Rt32.h):<<1:rnd",
-M_tc_3x_SLOT23, TypeM>, Enc_1997594 {
+tc_8c8041e6, TypeM>, Enc_be32a5 {
let Inst{7-5} = 0b011;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11100100101;
@@ -15051,7 +15105,7 @@ def M2_mpyd_rnd_hl_s0 : HInst<
(outs DoubleRegs:$Rdd32),
(ins IntRegs:$Rs32, IntRegs:$Rt32),
"$Rdd32 = mpy($Rs32.h,$Rt32.l):rnd",
-M_tc_3x_SLOT23, TypeM>, Enc_1997594 {
+tc_8c8041e6, TypeM>, Enc_be32a5 {
let Inst{7-5} = 0b010;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11100100001;
@@ -15061,7 +15115,7 @@ def M2_mpyd_rnd_hl_s1 : HInst<
(outs DoubleRegs:$Rdd32),
(ins IntRegs:$Rs32, IntRegs:$Rt32),
"$Rdd32 = mpy($Rs32.h,$Rt32.l):<<1:rnd",
-M_tc_3x_SLOT23, TypeM>, Enc_1997594 {
+tc_8c8041e6, TypeM>, Enc_be32a5 {
let Inst{7-5} = 0b010;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11100100101;
@@ -15071,7 +15125,7 @@ def M2_mpyd_rnd_lh_s0 : HInst<
(outs DoubleRegs:$Rdd32),
(ins IntRegs:$Rs32, IntRegs:$Rt32),
"$Rdd32 = mpy($Rs32.l,$Rt32.h):rnd",
-M_tc_3x_SLOT23, TypeM>, Enc_1997594 {
+tc_8c8041e6, TypeM>, Enc_be32a5 {
let Inst{7-5} = 0b001;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11100100001;
@@ -15081,7 +15135,7 @@ def M2_mpyd_rnd_lh_s1 : HInst<
(outs DoubleRegs:$Rdd32),
(ins IntRegs:$Rs32, IntRegs:$Rt32),
"$Rdd32 = mpy($Rs32.l,$Rt32.h):<<1:rnd",
-M_tc_3x_SLOT23, TypeM>, Enc_1997594 {
+tc_8c8041e6, TypeM>, Enc_be32a5 {
let Inst{7-5} = 0b001;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11100100101;
@@ -15091,7 +15145,7 @@ def M2_mpyd_rnd_ll_s0 : HInst<
(outs DoubleRegs:$Rdd32),
(ins IntRegs:$Rs32, IntRegs:$Rt32),
"$Rdd32 = mpy($Rs32.l,$Rt32.l):rnd",
-M_tc_3x_SLOT23, TypeM>, Enc_1997594 {
+tc_8c8041e6, TypeM>, Enc_be32a5 {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11100100001;
@@ -15101,7 +15155,7 @@ def M2_mpyd_rnd_ll_s1 : HInst<
(outs DoubleRegs:$Rdd32),
(ins IntRegs:$Rs32, IntRegs:$Rt32),
"$Rdd32 = mpy($Rs32.l,$Rt32.l):<<1:rnd",
-M_tc_3x_SLOT23, TypeM>, Enc_1997594 {
+tc_8c8041e6, TypeM>, Enc_be32a5 {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11100100101;
@@ -15111,7 +15165,7 @@ def M2_mpyi : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rs32, IntRegs:$Rt32),
"$Rd32 = mpyi($Rs32,$Rt32)",
-M_tc_3x_SLOT23, TypeM>, Enc_14071773, ImmRegRel {
+tc_8c8041e6, TypeM>, Enc_5ab2be, ImmRegRel {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101101000;
@@ -15125,7 +15179,7 @@ def M2_mpysin : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rs32, u8_0Imm:$Ii),
"$Rd32 = -mpyi($Rs32,#$Ii)",
-M_tc_3x_SLOT23, TypeM>, Enc_16355964 {
+tc_ae2c2dc2, TypeM>, Enc_b8c967 {
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11100000100;
let hasNewValue = 1;
@@ -15136,7 +15190,7 @@ def M2_mpysip : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rs32, u32_0Imm:$Ii),
"$Rd32 = +mpyi($Rs32,#$Ii)",
-M_tc_3x_SLOT23, TypeM>, Enc_16355964 {
+tc_ae2c2dc2, TypeM>, Enc_b8c967 {
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11100000000;
let hasNewValue = 1;
@@ -15152,7 +15206,7 @@ def M2_mpysmi : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rs32, m32_0Imm:$Ii),
"$Rd32 = mpyi($Rs32,#$Ii)",
-M_tc_3x_SLOT23, TypeM>, ImmRegRel {
+tc_ae2c2dc2, TypeM>, ImmRegRel {
let hasNewValue = 1;
let opNewValue = 0;
let CextOpcode = "M2_mpyi";
@@ -15168,7 +15222,7 @@ def M2_mpysu_up : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rs32, IntRegs:$Rt32),
"$Rd32 = mpysu($Rs32,$Rt32)",
-M_tc_3x_SLOT23, TypeM>, Enc_14071773 {
+tc_8c8041e6, TypeM>, Enc_5ab2be {
let Inst{7-5} = 0b001;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101101011;
@@ -15180,7 +15234,7 @@ def M2_mpyu_acc_hh_s0 : HInst<
(outs IntRegs:$Rx32),
(ins IntRegs:$Rx32in, IntRegs:$Rs32, IntRegs:$Rt32),
"$Rx32 += mpyu($Rs32.h,$Rt32.h)",
-M_tc_3x_acc_SLOT23, TypeM>, Enc_9223889 {
+tc_8cb685d9, TypeM>, Enc_2ae154 {
let Inst{7-5} = 0b011;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101110010;
@@ -15193,7 +15247,7 @@ def M2_mpyu_acc_hh_s1 : HInst<
(outs IntRegs:$Rx32),
(ins IntRegs:$Rx32in, IntRegs:$Rs32, IntRegs:$Rt32),
"$Rx32 += mpyu($Rs32.h,$Rt32.h):<<1",
-M_tc_3x_acc_SLOT23, TypeM>, Enc_9223889 {
+tc_8cb685d9, TypeM>, Enc_2ae154 {
let Inst{7-5} = 0b011;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101110110;
@@ -15206,7 +15260,7 @@ def M2_mpyu_acc_hl_s0 : HInst<
(outs IntRegs:$Rx32),
(ins IntRegs:$Rx32in, IntRegs:$Rs32, IntRegs:$Rt32),
"$Rx32 += mpyu($Rs32.h,$Rt32.l)",
-M_tc_3x_acc_SLOT23, TypeM>, Enc_9223889 {
+tc_8cb685d9, TypeM>, Enc_2ae154 {
let Inst{7-5} = 0b010;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101110010;
@@ -15219,7 +15273,7 @@ def M2_mpyu_acc_hl_s1 : HInst<
(outs IntRegs:$Rx32),
(ins IntRegs:$Rx32in, IntRegs:$Rs32, IntRegs:$Rt32),
"$Rx32 += mpyu($Rs32.h,$Rt32.l):<<1",
-M_tc_3x_acc_SLOT23, TypeM>, Enc_9223889 {
+tc_8cb685d9, TypeM>, Enc_2ae154 {
let Inst{7-5} = 0b010;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101110110;
@@ -15232,7 +15286,7 @@ def M2_mpyu_acc_lh_s0 : HInst<
(outs IntRegs:$Rx32),
(ins IntRegs:$Rx32in, IntRegs:$Rs32, IntRegs:$Rt32),
"$Rx32 += mpyu($Rs32.l,$Rt32.h)",
-M_tc_3x_acc_SLOT23, TypeM>, Enc_9223889 {
+tc_8cb685d9, TypeM>, Enc_2ae154 {
let Inst{7-5} = 0b001;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101110010;
@@ -15245,7 +15299,7 @@ def M2_mpyu_acc_lh_s1 : HInst<
(outs IntRegs:$Rx32),
(ins IntRegs:$Rx32in, IntRegs:$Rs32, IntRegs:$Rt32),
"$Rx32 += mpyu($Rs32.l,$Rt32.h):<<1",
-M_tc_3x_acc_SLOT23, TypeM>, Enc_9223889 {
+tc_8cb685d9, TypeM>, Enc_2ae154 {
let Inst{7-5} = 0b001;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101110110;
@@ -15258,7 +15312,7 @@ def M2_mpyu_acc_ll_s0 : HInst<
(outs IntRegs:$Rx32),
(ins IntRegs:$Rx32in, IntRegs:$Rs32, IntRegs:$Rt32),
"$Rx32 += mpyu($Rs32.l,$Rt32.l)",
-M_tc_3x_acc_SLOT23, TypeM>, Enc_9223889 {
+tc_8cb685d9, TypeM>, Enc_2ae154 {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101110010;
@@ -15271,7 +15325,7 @@ def M2_mpyu_acc_ll_s1 : HInst<
(outs IntRegs:$Rx32),
(ins IntRegs:$Rx32in, IntRegs:$Rs32, IntRegs:$Rt32),
"$Rx32 += mpyu($Rs32.l,$Rt32.l):<<1",
-M_tc_3x_acc_SLOT23, TypeM>, Enc_9223889 {
+tc_8cb685d9, TypeM>, Enc_2ae154 {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101110110;
@@ -15284,7 +15338,7 @@ def M2_mpyu_hh_s0 : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rs32, IntRegs:$Rt32),
"$Rd32 = mpyu($Rs32.h,$Rt32.h)",
-M_tc_3x_SLOT23, TypeM>, Enc_14071773 {
+tc_8c8041e6, TypeM>, Enc_5ab2be {
let Inst{7-5} = 0b011;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101100010;
@@ -15296,7 +15350,7 @@ def M2_mpyu_hh_s1 : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rs32, IntRegs:$Rt32),
"$Rd32 = mpyu($Rs32.h,$Rt32.h):<<1",
-M_tc_3x_SLOT23, TypeM>, Enc_14071773 {
+tc_8c8041e6, TypeM>, Enc_5ab2be {
let Inst{7-5} = 0b011;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101100110;
@@ -15308,7 +15362,7 @@ def M2_mpyu_hl_s0 : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rs32, IntRegs:$Rt32),
"$Rd32 = mpyu($Rs32.h,$Rt32.l)",
-M_tc_3x_SLOT23, TypeM>, Enc_14071773 {
+tc_8c8041e6, TypeM>, Enc_5ab2be {
let Inst{7-5} = 0b010;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101100010;
@@ -15320,7 +15374,7 @@ def M2_mpyu_hl_s1 : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rs32, IntRegs:$Rt32),
"$Rd32 = mpyu($Rs32.h,$Rt32.l):<<1",
-M_tc_3x_SLOT23, TypeM>, Enc_14071773 {
+tc_8c8041e6, TypeM>, Enc_5ab2be {
let Inst{7-5} = 0b010;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101100110;
@@ -15332,7 +15386,7 @@ def M2_mpyu_lh_s0 : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rs32, IntRegs:$Rt32),
"$Rd32 = mpyu($Rs32.l,$Rt32.h)",
-M_tc_3x_SLOT23, TypeM>, Enc_14071773 {
+tc_8c8041e6, TypeM>, Enc_5ab2be {
let Inst{7-5} = 0b001;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101100010;
@@ -15344,7 +15398,7 @@ def M2_mpyu_lh_s1 : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rs32, IntRegs:$Rt32),
"$Rd32 = mpyu($Rs32.l,$Rt32.h):<<1",
-M_tc_3x_SLOT23, TypeM>, Enc_14071773 {
+tc_8c8041e6, TypeM>, Enc_5ab2be {
let Inst{7-5} = 0b001;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101100110;
@@ -15356,7 +15410,7 @@ def M2_mpyu_ll_s0 : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rs32, IntRegs:$Rt32),
"$Rd32 = mpyu($Rs32.l,$Rt32.l)",
-M_tc_3x_SLOT23, TypeM>, Enc_14071773 {
+tc_8c8041e6, TypeM>, Enc_5ab2be {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101100010;
@@ -15368,7 +15422,7 @@ def M2_mpyu_ll_s1 : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rs32, IntRegs:$Rt32),
"$Rd32 = mpyu($Rs32.l,$Rt32.l):<<1",
-M_tc_3x_SLOT23, TypeM>, Enc_14071773 {
+tc_8c8041e6, TypeM>, Enc_5ab2be {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101100110;
@@ -15380,7 +15434,7 @@ def M2_mpyu_nac_hh_s0 : HInst<
(outs IntRegs:$Rx32),
(ins IntRegs:$Rx32in, IntRegs:$Rs32, IntRegs:$Rt32),
"$Rx32 -= mpyu($Rs32.h,$Rt32.h)",
-M_tc_3x_acc_SLOT23, TypeM>, Enc_9223889 {
+tc_8cb685d9, TypeM>, Enc_2ae154 {
let Inst{7-5} = 0b011;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101110011;
@@ -15393,7 +15447,7 @@ def M2_mpyu_nac_hh_s1 : HInst<
(outs IntRegs:$Rx32),
(ins IntRegs:$Rx32in, IntRegs:$Rs32, IntRegs:$Rt32),
"$Rx32 -= mpyu($Rs32.h,$Rt32.h):<<1",
-M_tc_3x_acc_SLOT23, TypeM>, Enc_9223889 {
+tc_8cb685d9, TypeM>, Enc_2ae154 {
let Inst{7-5} = 0b011;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101110111;
@@ -15406,7 +15460,7 @@ def M2_mpyu_nac_hl_s0 : HInst<
(outs IntRegs:$Rx32),
(ins IntRegs:$Rx32in, IntRegs:$Rs32, IntRegs:$Rt32),
"$Rx32 -= mpyu($Rs32.h,$Rt32.l)",
-M_tc_3x_acc_SLOT23, TypeM>, Enc_9223889 {
+tc_8cb685d9, TypeM>, Enc_2ae154 {
let Inst{7-5} = 0b010;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101110011;
@@ -15419,7 +15473,7 @@ def M2_mpyu_nac_hl_s1 : HInst<
(outs IntRegs:$Rx32),
(ins IntRegs:$Rx32in, IntRegs:$Rs32, IntRegs:$Rt32),
"$Rx32 -= mpyu($Rs32.h,$Rt32.l):<<1",
-M_tc_3x_acc_SLOT23, TypeM>, Enc_9223889 {
+tc_8cb685d9, TypeM>, Enc_2ae154 {
let Inst{7-5} = 0b010;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101110111;
@@ -15432,7 +15486,7 @@ def M2_mpyu_nac_lh_s0 : HInst<
(outs IntRegs:$Rx32),
(ins IntRegs:$Rx32in, IntRegs:$Rs32, IntRegs:$Rt32),
"$Rx32 -= mpyu($Rs32.l,$Rt32.h)",
-M_tc_3x_acc_SLOT23, TypeM>, Enc_9223889 {
+tc_8cb685d9, TypeM>, Enc_2ae154 {
let Inst{7-5} = 0b001;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101110011;
@@ -15445,7 +15499,7 @@ def M2_mpyu_nac_lh_s1 : HInst<
(outs IntRegs:$Rx32),
(ins IntRegs:$Rx32in, IntRegs:$Rs32, IntRegs:$Rt32),
"$Rx32 -= mpyu($Rs32.l,$Rt32.h):<<1",
-M_tc_3x_acc_SLOT23, TypeM>, Enc_9223889 {
+tc_8cb685d9, TypeM>, Enc_2ae154 {
let Inst{7-5} = 0b001;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101110111;
@@ -15458,7 +15512,7 @@ def M2_mpyu_nac_ll_s0 : HInst<
(outs IntRegs:$Rx32),
(ins IntRegs:$Rx32in, IntRegs:$Rs32, IntRegs:$Rt32),
"$Rx32 -= mpyu($Rs32.l,$Rt32.l)",
-M_tc_3x_acc_SLOT23, TypeM>, Enc_9223889 {
+tc_8cb685d9, TypeM>, Enc_2ae154 {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101110011;
@@ -15471,7 +15525,7 @@ def M2_mpyu_nac_ll_s1 : HInst<
(outs IntRegs:$Rx32),
(ins IntRegs:$Rx32in, IntRegs:$Rs32, IntRegs:$Rt32),
"$Rx32 -= mpyu($Rs32.l,$Rt32.l):<<1",
-M_tc_3x_acc_SLOT23, TypeM>, Enc_9223889 {
+tc_8cb685d9, TypeM>, Enc_2ae154 {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101110111;
@@ -15484,7 +15538,7 @@ def M2_mpyu_up : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rs32, IntRegs:$Rt32),
"$Rd32 = mpyu($Rs32,$Rt32)",
-M_tc_3x_SLOT23, TypeM>, Enc_14071773 {
+tc_8c8041e6, TypeM>, Enc_5ab2be {
let Inst{7-5} = 0b001;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101101010;
@@ -15496,7 +15550,7 @@ def M2_mpyud_acc_hh_s0 : HInst<
(outs DoubleRegs:$Rxx32),
(ins DoubleRegs:$Rxx32in, IntRegs:$Rs32, IntRegs:$Rt32),
"$Rxx32 += mpyu($Rs32.h,$Rt32.h)",
-M_tc_3x_acc_SLOT23, TypeM>, Enc_1409050 {
+tc_8cb685d9, TypeM>, Enc_61f0b0 {
let Inst{7-5} = 0b011;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11100110010;
@@ -15507,7 +15561,7 @@ def M2_mpyud_acc_hh_s1 : HInst<
(outs DoubleRegs:$Rxx32),
(ins DoubleRegs:$Rxx32in, IntRegs:$Rs32, IntRegs:$Rt32),
"$Rxx32 += mpyu($Rs32.h,$Rt32.h):<<1",
-M_tc_3x_acc_SLOT23, TypeM>, Enc_1409050 {
+tc_8cb685d9, TypeM>, Enc_61f0b0 {
let Inst{7-5} = 0b011;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11100110110;
@@ -15518,7 +15572,7 @@ def M2_mpyud_acc_hl_s0 : HInst<
(outs DoubleRegs:$Rxx32),
(ins DoubleRegs:$Rxx32in, IntRegs:$Rs32, IntRegs:$Rt32),
"$Rxx32 += mpyu($Rs32.h,$Rt32.l)",
-M_tc_3x_acc_SLOT23, TypeM>, Enc_1409050 {
+tc_8cb685d9, TypeM>, Enc_61f0b0 {
let Inst{7-5} = 0b010;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11100110010;
@@ -15529,7 +15583,7 @@ def M2_mpyud_acc_hl_s1 : HInst<
(outs DoubleRegs:$Rxx32),
(ins DoubleRegs:$Rxx32in, IntRegs:$Rs32, IntRegs:$Rt32),
"$Rxx32 += mpyu($Rs32.h,$Rt32.l):<<1",
-M_tc_3x_acc_SLOT23, TypeM>, Enc_1409050 {
+tc_8cb685d9, TypeM>, Enc_61f0b0 {
let Inst{7-5} = 0b010;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11100110110;
@@ -15540,7 +15594,7 @@ def M2_mpyud_acc_lh_s0 : HInst<
(outs DoubleRegs:$Rxx32),
(ins DoubleRegs:$Rxx32in, IntRegs:$Rs32, IntRegs:$Rt32),
"$Rxx32 += mpyu($Rs32.l,$Rt32.h)",
-M_tc_3x_acc_SLOT23, TypeM>, Enc_1409050 {
+tc_8cb685d9, TypeM>, Enc_61f0b0 {
let Inst{7-5} = 0b001;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11100110010;
@@ -15551,7 +15605,7 @@ def M2_mpyud_acc_lh_s1 : HInst<
(outs DoubleRegs:$Rxx32),
(ins DoubleRegs:$Rxx32in, IntRegs:$Rs32, IntRegs:$Rt32),
"$Rxx32 += mpyu($Rs32.l,$Rt32.h):<<1",
-M_tc_3x_acc_SLOT23, TypeM>, Enc_1409050 {
+tc_8cb685d9, TypeM>, Enc_61f0b0 {
let Inst{7-5} = 0b001;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11100110110;
@@ -15562,7 +15616,7 @@ def M2_mpyud_acc_ll_s0 : HInst<
(outs DoubleRegs:$Rxx32),
(ins DoubleRegs:$Rxx32in, IntRegs:$Rs32, IntRegs:$Rt32),
"$Rxx32 += mpyu($Rs32.l,$Rt32.l)",
-M_tc_3x_acc_SLOT23, TypeM>, Enc_1409050 {
+tc_8cb685d9, TypeM>, Enc_61f0b0 {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11100110010;
@@ -15573,7 +15627,7 @@ def M2_mpyud_acc_ll_s1 : HInst<
(outs DoubleRegs:$Rxx32),
(ins DoubleRegs:$Rxx32in, IntRegs:$Rs32, IntRegs:$Rt32),
"$Rxx32 += mpyu($Rs32.l,$Rt32.l):<<1",
-M_tc_3x_acc_SLOT23, TypeM>, Enc_1409050 {
+tc_8cb685d9, TypeM>, Enc_61f0b0 {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11100110110;
@@ -15584,7 +15638,7 @@ def M2_mpyud_hh_s0 : HInst<
(outs DoubleRegs:$Rdd32),
(ins IntRegs:$Rs32, IntRegs:$Rt32),
"$Rdd32 = mpyu($Rs32.h,$Rt32.h)",
-M_tc_3x_SLOT23, TypeM>, Enc_1997594 {
+tc_8c8041e6, TypeM>, Enc_be32a5 {
let Inst{7-5} = 0b011;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11100100010;
@@ -15594,7 +15648,7 @@ def M2_mpyud_hh_s1 : HInst<
(outs DoubleRegs:$Rdd32),
(ins IntRegs:$Rs32, IntRegs:$Rt32),
"$Rdd32 = mpyu($Rs32.h,$Rt32.h):<<1",
-M_tc_3x_SLOT23, TypeM>, Enc_1997594 {
+tc_8c8041e6, TypeM>, Enc_be32a5 {
let Inst{7-5} = 0b011;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11100100110;
@@ -15604,7 +15658,7 @@ def M2_mpyud_hl_s0 : HInst<
(outs DoubleRegs:$Rdd32),
(ins IntRegs:$Rs32, IntRegs:$Rt32),
"$Rdd32 = mpyu($Rs32.h,$Rt32.l)",
-M_tc_3x_SLOT23, TypeM>, Enc_1997594 {
+tc_8c8041e6, TypeM>, Enc_be32a5 {
let Inst{7-5} = 0b010;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11100100010;
@@ -15614,7 +15668,7 @@ def M2_mpyud_hl_s1 : HInst<
(outs DoubleRegs:$Rdd32),
(ins IntRegs:$Rs32, IntRegs:$Rt32),
"$Rdd32 = mpyu($Rs32.h,$Rt32.l):<<1",
-M_tc_3x_SLOT23, TypeM>, Enc_1997594 {
+tc_8c8041e6, TypeM>, Enc_be32a5 {
let Inst{7-5} = 0b010;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11100100110;
@@ -15624,7 +15678,7 @@ def M2_mpyud_lh_s0 : HInst<
(outs DoubleRegs:$Rdd32),
(ins IntRegs:$Rs32, IntRegs:$Rt32),
"$Rdd32 = mpyu($Rs32.l,$Rt32.h)",
-M_tc_3x_SLOT23, TypeM>, Enc_1997594 {
+tc_8c8041e6, TypeM>, Enc_be32a5 {
let Inst{7-5} = 0b001;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11100100010;
@@ -15634,7 +15688,7 @@ def M2_mpyud_lh_s1 : HInst<
(outs DoubleRegs:$Rdd32),
(ins IntRegs:$Rs32, IntRegs:$Rt32),
"$Rdd32 = mpyu($Rs32.l,$Rt32.h):<<1",
-M_tc_3x_SLOT23, TypeM>, Enc_1997594 {
+tc_8c8041e6, TypeM>, Enc_be32a5 {
let Inst{7-5} = 0b001;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11100100110;
@@ -15644,7 +15698,7 @@ def M2_mpyud_ll_s0 : HInst<
(outs DoubleRegs:$Rdd32),
(ins IntRegs:$Rs32, IntRegs:$Rt32),
"$Rdd32 = mpyu($Rs32.l,$Rt32.l)",
-M_tc_3x_SLOT23, TypeM>, Enc_1997594 {
+tc_8c8041e6, TypeM>, Enc_be32a5 {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11100100010;
@@ -15654,7 +15708,7 @@ def M2_mpyud_ll_s1 : HInst<
(outs DoubleRegs:$Rdd32),
(ins IntRegs:$Rs32, IntRegs:$Rt32),
"$Rdd32 = mpyu($Rs32.l,$Rt32.l):<<1",
-M_tc_3x_SLOT23, TypeM>, Enc_1997594 {
+tc_8c8041e6, TypeM>, Enc_be32a5 {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11100100110;
@@ -15664,7 +15718,7 @@ def M2_mpyud_nac_hh_s0 : HInst<
(outs DoubleRegs:$Rxx32),
(ins DoubleRegs:$Rxx32in, IntRegs:$Rs32, IntRegs:$Rt32),
"$Rxx32 -= mpyu($Rs32.h,$Rt32.h)",
-M_tc_3x_acc_SLOT23, TypeM>, Enc_1409050 {
+tc_8cb685d9, TypeM>, Enc_61f0b0 {
let Inst{7-5} = 0b011;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11100110011;
@@ -15675,7 +15729,7 @@ def M2_mpyud_nac_hh_s1 : HInst<
(outs DoubleRegs:$Rxx32),
(ins DoubleRegs:$Rxx32in, IntRegs:$Rs32, IntRegs:$Rt32),
"$Rxx32 -= mpyu($Rs32.h,$Rt32.h):<<1",
-M_tc_3x_acc_SLOT23, TypeM>, Enc_1409050 {
+tc_8cb685d9, TypeM>, Enc_61f0b0 {
let Inst{7-5} = 0b011;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11100110111;
@@ -15686,7 +15740,7 @@ def M2_mpyud_nac_hl_s0 : HInst<
(outs DoubleRegs:$Rxx32),
(ins DoubleRegs:$Rxx32in, IntRegs:$Rs32, IntRegs:$Rt32),
"$Rxx32 -= mpyu($Rs32.h,$Rt32.l)",
-M_tc_3x_acc_SLOT23, TypeM>, Enc_1409050 {
+tc_8cb685d9, TypeM>, Enc_61f0b0 {
let Inst{7-5} = 0b010;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11100110011;
@@ -15697,7 +15751,7 @@ def M2_mpyud_nac_hl_s1 : HInst<
(outs DoubleRegs:$Rxx32),
(ins DoubleRegs:$Rxx32in, IntRegs:$Rs32, IntRegs:$Rt32),
"$Rxx32 -= mpyu($Rs32.h,$Rt32.l):<<1",
-M_tc_3x_acc_SLOT23, TypeM>, Enc_1409050 {
+tc_8cb685d9, TypeM>, Enc_61f0b0 {
let Inst{7-5} = 0b010;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11100110111;
@@ -15708,7 +15762,7 @@ def M2_mpyud_nac_lh_s0 : HInst<
(outs DoubleRegs:$Rxx32),
(ins DoubleRegs:$Rxx32in, IntRegs:$Rs32, IntRegs:$Rt32),
"$Rxx32 -= mpyu($Rs32.l,$Rt32.h)",
-M_tc_3x_acc_SLOT23, TypeM>, Enc_1409050 {
+tc_8cb685d9, TypeM>, Enc_61f0b0 {
let Inst{7-5} = 0b001;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11100110011;
@@ -15719,7 +15773,7 @@ def M2_mpyud_nac_lh_s1 : HInst<
(outs DoubleRegs:$Rxx32),
(ins DoubleRegs:$Rxx32in, IntRegs:$Rs32, IntRegs:$Rt32),
"$Rxx32 -= mpyu($Rs32.l,$Rt32.h):<<1",
-M_tc_3x_acc_SLOT23, TypeM>, Enc_1409050 {
+tc_8cb685d9, TypeM>, Enc_61f0b0 {
let Inst{7-5} = 0b001;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11100110111;
@@ -15730,7 +15784,7 @@ def M2_mpyud_nac_ll_s0 : HInst<
(outs DoubleRegs:$Rxx32),
(ins DoubleRegs:$Rxx32in, IntRegs:$Rs32, IntRegs:$Rt32),
"$Rxx32 -= mpyu($Rs32.l,$Rt32.l)",
-M_tc_3x_acc_SLOT23, TypeM>, Enc_1409050 {
+tc_8cb685d9, TypeM>, Enc_61f0b0 {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11100110011;
@@ -15741,7 +15795,7 @@ def M2_mpyud_nac_ll_s1 : HInst<
(outs DoubleRegs:$Rxx32),
(ins DoubleRegs:$Rxx32in, IntRegs:$Rs32, IntRegs:$Rt32),
"$Rxx32 -= mpyu($Rs32.l,$Rt32.l):<<1",
-M_tc_3x_acc_SLOT23, TypeM>, Enc_1409050 {
+tc_8cb685d9, TypeM>, Enc_61f0b0 {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11100110111;
@@ -15752,7 +15806,7 @@ def M2_mpyui : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rs32, IntRegs:$Rt32),
"$Rd32 = mpyui($Rs32,$Rt32)",
-M_tc_3x_SLOT23, TypeM> {
+tc_8c8041e6, TypeM> {
let hasNewValue = 1;
let opNewValue = 0;
let isPseudo = 1;
@@ -15762,7 +15816,7 @@ def M2_nacci : HInst<
(outs IntRegs:$Rx32),
(ins IntRegs:$Rx32in, IntRegs:$Rs32, IntRegs:$Rt32),
"$Rx32 -= add($Rs32,$Rt32)",
-M_tc_2_acc_SLOT23, TypeM>, Enc_9223889 {
+tc_c0cd91a8, TypeM>, Enc_2ae154 {
let Inst{7-5} = 0b001;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101111100;
@@ -15776,7 +15830,7 @@ def M2_naccii : HInst<
(outs IntRegs:$Rx32),
(ins IntRegs:$Rx32in, IntRegs:$Rs32, s32_0Imm:$Ii),
"$Rx32 -= add($Rs32,#$Ii)",
-M_tc_2_acc_SLOT23, TypeM>, Enc_11522288 {
+tc_c0cd91a8, TypeM>, Enc_c90aca {
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11100010100;
let hasNewValue = 1;
@@ -15794,7 +15848,7 @@ def M2_subacc : HInst<
(outs IntRegs:$Rx32),
(ins IntRegs:$Rx32in, IntRegs:$Rt32, IntRegs:$Rs32),
"$Rx32 += sub($Rt32,$Rs32)",
-M_tc_2_acc_SLOT23, TypeM>, Enc_7692963 {
+tc_c0cd91a8, TypeM>, Enc_a568d4 {
let Inst{7-5} = 0b011;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101111000;
@@ -15808,7 +15862,7 @@ def M2_vabsdiffh : HInst<
(outs DoubleRegs:$Rdd32),
(ins DoubleRegs:$Rtt32, DoubleRegs:$Rss32),
"$Rdd32 = vabsdiffh($Rtt32,$Rss32)",
-M_tc_2_SLOT23, TypeM>, Enc_11687333 {
+tc_63cd9d2d, TypeM>, Enc_ea23e4 {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101000011;
@@ -15818,7 +15872,7 @@ def M2_vabsdiffw : HInst<
(outs DoubleRegs:$Rdd32),
(ins DoubleRegs:$Rtt32, DoubleRegs:$Rss32),
"$Rdd32 = vabsdiffw($Rtt32,$Rss32)",
-M_tc_2_SLOT23, TypeM>, Enc_11687333 {
+tc_63cd9d2d, TypeM>, Enc_ea23e4 {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101000001;
@@ -15828,7 +15882,7 @@ def M2_vcmac_s0_sat_i : HInst<
(outs DoubleRegs:$Rxx32),
(ins DoubleRegs:$Rxx32in, DoubleRegs:$Rss32, DoubleRegs:$Rtt32),
"$Rxx32 += vcmpyi($Rss32,$Rtt32):sat",
-M_tc_3x_acc_SLOT23, TypeM>, Enc_12702821 {
+tc_8cb685d9, TypeM>, Enc_88c16c {
let Inst{7-5} = 0b100;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101010010;
@@ -15840,7 +15894,7 @@ def M2_vcmac_s0_sat_r : HInst<
(outs DoubleRegs:$Rxx32),
(ins DoubleRegs:$Rxx32in, DoubleRegs:$Rss32, DoubleRegs:$Rtt32),
"$Rxx32 += vcmpyr($Rss32,$Rtt32):sat",
-M_tc_3x_acc_SLOT23, TypeM>, Enc_12702821 {
+tc_8cb685d9, TypeM>, Enc_88c16c {
let Inst{7-5} = 0b100;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101010001;
@@ -15852,7 +15906,7 @@ def M2_vcmpy_s0_sat_i : HInst<
(outs DoubleRegs:$Rdd32),
(ins DoubleRegs:$Rss32, DoubleRegs:$Rtt32),
"$Rdd32 = vcmpyi($Rss32,$Rtt32):sat",
-M_tc_3x_acc_SLOT23, TypeM>, Enc_8333157 {
+tc_8c8041e6, TypeM>, Enc_a56825 {
let Inst{7-5} = 0b110;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101000010;
@@ -15863,7 +15917,7 @@ def M2_vcmpy_s0_sat_r : HInst<
(outs DoubleRegs:$Rdd32),
(ins DoubleRegs:$Rss32, DoubleRegs:$Rtt32),
"$Rdd32 = vcmpyr($Rss32,$Rtt32):sat",
-M_tc_3x_acc_SLOT23, TypeM>, Enc_8333157 {
+tc_8c8041e6, TypeM>, Enc_a56825 {
let Inst{7-5} = 0b110;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101000001;
@@ -15874,7 +15928,7 @@ def M2_vcmpy_s1_sat_i : HInst<
(outs DoubleRegs:$Rdd32),
(ins DoubleRegs:$Rss32, DoubleRegs:$Rtt32),
"$Rdd32 = vcmpyi($Rss32,$Rtt32):<<1:sat",
-M_tc_3x_acc_SLOT23, TypeM>, Enc_8333157 {
+tc_8c8041e6, TypeM>, Enc_a56825 {
let Inst{7-5} = 0b110;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101000110;
@@ -15885,7 +15939,7 @@ def M2_vcmpy_s1_sat_r : HInst<
(outs DoubleRegs:$Rdd32),
(ins DoubleRegs:$Rss32, DoubleRegs:$Rtt32),
"$Rdd32 = vcmpyr($Rss32,$Rtt32):<<1:sat",
-M_tc_3x_acc_SLOT23, TypeM>, Enc_8333157 {
+tc_8c8041e6, TypeM>, Enc_a56825 {
let Inst{7-5} = 0b110;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101000101;
@@ -15896,7 +15950,7 @@ def M2_vdmacs_s0 : HInst<
(outs DoubleRegs:$Rxx32),
(ins DoubleRegs:$Rxx32in, DoubleRegs:$Rss32, DoubleRegs:$Rtt32),
"$Rxx32 += vdmpy($Rss32,$Rtt32):sat",
-M_tc_3x_acc_SLOT23, TypeM>, Enc_12702821 {
+tc_8cb685d9, TypeM>, Enc_88c16c {
let Inst{7-5} = 0b100;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101010000;
@@ -15908,7 +15962,7 @@ def M2_vdmacs_s1 : HInst<
(outs DoubleRegs:$Rxx32),
(ins DoubleRegs:$Rxx32in, DoubleRegs:$Rss32, DoubleRegs:$Rtt32),
"$Rxx32 += vdmpy($Rss32,$Rtt32):<<1:sat",
-M_tc_3x_acc_SLOT23, TypeM>, Enc_12702821 {
+tc_8cb685d9, TypeM>, Enc_88c16c {
let Inst{7-5} = 0b100;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101010100;
@@ -15920,7 +15974,7 @@ def M2_vdmpyrs_s0 : HInst<
(outs IntRegs:$Rd32),
(ins DoubleRegs:$Rss32, DoubleRegs:$Rtt32),
"$Rd32 = vdmpy($Rss32,$Rtt32):rnd:sat",
-M_tc_3x_SLOT23, TypeM>, Enc_9277990 {
+tc_8c8041e6, TypeM>, Enc_d2216a {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101001000;
@@ -15933,7 +15987,7 @@ def M2_vdmpyrs_s1 : HInst<
(outs IntRegs:$Rd32),
(ins DoubleRegs:$Rss32, DoubleRegs:$Rtt32),
"$Rd32 = vdmpy($Rss32,$Rtt32):<<1:rnd:sat",
-M_tc_3x_SLOT23, TypeM>, Enc_9277990 {
+tc_8c8041e6, TypeM>, Enc_d2216a {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101001100;
@@ -15946,7 +16000,7 @@ def M2_vdmpys_s0 : HInst<
(outs DoubleRegs:$Rdd32),
(ins DoubleRegs:$Rss32, DoubleRegs:$Rtt32),
"$Rdd32 = vdmpy($Rss32,$Rtt32):sat",
-M_tc_3x_SLOT23, TypeM>, Enc_8333157 {
+tc_8c8041e6, TypeM>, Enc_a56825 {
let Inst{7-5} = 0b100;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101000000;
@@ -15957,7 +16011,7 @@ def M2_vdmpys_s1 : HInst<
(outs DoubleRegs:$Rdd32),
(ins DoubleRegs:$Rss32, DoubleRegs:$Rtt32),
"$Rdd32 = vdmpy($Rss32,$Rtt32):<<1:sat",
-M_tc_3x_SLOT23, TypeM>, Enc_8333157 {
+tc_8c8041e6, TypeM>, Enc_a56825 {
let Inst{7-5} = 0b100;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101000100;
@@ -15968,7 +16022,7 @@ def M2_vmac2 : HInst<
(outs DoubleRegs:$Rxx32),
(ins DoubleRegs:$Rxx32in, IntRegs:$Rs32, IntRegs:$Rt32),
"$Rxx32 += vmpyh($Rs32,$Rt32)",
-M_tc_3x_acc_SLOT23, TypeM>, Enc_1409050 {
+tc_8cb685d9, TypeM>, Enc_61f0b0 {
let Inst{7-5} = 0b001;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11100111001;
@@ -15979,7 +16033,7 @@ def M2_vmac2es : HInst<
(outs DoubleRegs:$Rxx32),
(ins DoubleRegs:$Rxx32in, DoubleRegs:$Rss32, DoubleRegs:$Rtt32),
"$Rxx32 += vmpyeh($Rss32,$Rtt32)",
-M_tc_3x_acc_SLOT23, TypeM>, Enc_12702821 {
+tc_8cb685d9, TypeM>, Enc_88c16c {
let Inst{7-5} = 0b010;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101010001;
@@ -15990,7 +16044,7 @@ def M2_vmac2es_s0 : HInst<
(outs DoubleRegs:$Rxx32),
(ins DoubleRegs:$Rxx32in, DoubleRegs:$Rss32, DoubleRegs:$Rtt32),
"$Rxx32 += vmpyeh($Rss32,$Rtt32):sat",
-M_tc_3x_acc_SLOT23, TypeM>, Enc_12702821 {
+tc_8cb685d9, TypeM>, Enc_88c16c {
let Inst{7-5} = 0b110;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101010000;
@@ -16002,7 +16056,7 @@ def M2_vmac2es_s1 : HInst<
(outs DoubleRegs:$Rxx32),
(ins DoubleRegs:$Rxx32in, DoubleRegs:$Rss32, DoubleRegs:$Rtt32),
"$Rxx32 += vmpyeh($Rss32,$Rtt32):<<1:sat",
-M_tc_3x_acc_SLOT23, TypeM>, Enc_12702821 {
+tc_8cb685d9, TypeM>, Enc_88c16c {
let Inst{7-5} = 0b110;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101010100;
@@ -16014,7 +16068,7 @@ def M2_vmac2s_s0 : HInst<
(outs DoubleRegs:$Rxx32),
(ins DoubleRegs:$Rxx32in, IntRegs:$Rs32, IntRegs:$Rt32),
"$Rxx32 += vmpyh($Rs32,$Rt32):sat",
-M_tc_3x_acc_SLOT23, TypeM>, Enc_1409050 {
+tc_8cb685d9, TypeM>, Enc_61f0b0 {
let Inst{7-5} = 0b101;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11100111000;
@@ -16026,7 +16080,7 @@ def M2_vmac2s_s1 : HInst<
(outs DoubleRegs:$Rxx32),
(ins DoubleRegs:$Rxx32in, IntRegs:$Rs32, IntRegs:$Rt32),
"$Rxx32 += vmpyh($Rs32,$Rt32):<<1:sat",
-M_tc_3x_acc_SLOT23, TypeM>, Enc_1409050 {
+tc_8cb685d9, TypeM>, Enc_61f0b0 {
let Inst{7-5} = 0b101;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11100111100;
@@ -16038,7 +16092,7 @@ def M2_vmac2su_s0 : HInst<
(outs DoubleRegs:$Rxx32),
(ins DoubleRegs:$Rxx32in, IntRegs:$Rs32, IntRegs:$Rt32),
"$Rxx32 += vmpyhsu($Rs32,$Rt32):sat",
-M_tc_3x_acc_SLOT23, TypeM>, Enc_1409050 {
+tc_8cb685d9, TypeM>, Enc_61f0b0 {
let Inst{7-5} = 0b101;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11100111011;
@@ -16050,7 +16104,7 @@ def M2_vmac2su_s1 : HInst<
(outs DoubleRegs:$Rxx32),
(ins DoubleRegs:$Rxx32in, IntRegs:$Rs32, IntRegs:$Rt32),
"$Rxx32 += vmpyhsu($Rs32,$Rt32):<<1:sat",
-M_tc_3x_acc_SLOT23, TypeM>, Enc_1409050 {
+tc_8cb685d9, TypeM>, Enc_61f0b0 {
let Inst{7-5} = 0b101;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11100111111;
@@ -16062,7 +16116,7 @@ def M2_vmpy2es_s0 : HInst<
(outs DoubleRegs:$Rdd32),
(ins DoubleRegs:$Rss32, DoubleRegs:$Rtt32),
"$Rdd32 = vmpyeh($Rss32,$Rtt32):sat",
-M_tc_3x_SLOT23, TypeM>, Enc_8333157 {
+tc_8c8041e6, TypeM>, Enc_a56825 {
let Inst{7-5} = 0b110;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101000000;
@@ -16073,7 +16127,7 @@ def M2_vmpy2es_s1 : HInst<
(outs DoubleRegs:$Rdd32),
(ins DoubleRegs:$Rss32, DoubleRegs:$Rtt32),
"$Rdd32 = vmpyeh($Rss32,$Rtt32):<<1:sat",
-M_tc_3x_SLOT23, TypeM>, Enc_8333157 {
+tc_8c8041e6, TypeM>, Enc_a56825 {
let Inst{7-5} = 0b110;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101000100;
@@ -16084,7 +16138,7 @@ def M2_vmpy2s_s0 : HInst<
(outs DoubleRegs:$Rdd32),
(ins IntRegs:$Rs32, IntRegs:$Rt32),
"$Rdd32 = vmpyh($Rs32,$Rt32):sat",
-M_tc_3x_SLOT23, TypeM>, Enc_1997594 {
+tc_8c8041e6, TypeM>, Enc_be32a5 {
let Inst{7-5} = 0b101;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11100101000;
@@ -16095,7 +16149,7 @@ def M2_vmpy2s_s0pack : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rs32, IntRegs:$Rt32),
"$Rd32 = vmpyh($Rs32,$Rt32):rnd:sat",
-M_tc_3x_SLOT23, TypeM>, Enc_14071773 {
+tc_8c8041e6, TypeM>, Enc_5ab2be {
let Inst{7-5} = 0b111;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101101001;
@@ -16108,7 +16162,7 @@ def M2_vmpy2s_s1 : HInst<
(outs DoubleRegs:$Rdd32),
(ins IntRegs:$Rs32, IntRegs:$Rt32),
"$Rdd32 = vmpyh($Rs32,$Rt32):<<1:sat",
-M_tc_3x_SLOT23, TypeM>, Enc_1997594 {
+tc_8c8041e6, TypeM>, Enc_be32a5 {
let Inst{7-5} = 0b101;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11100101100;
@@ -16119,7 +16173,7 @@ def M2_vmpy2s_s1pack : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rs32, IntRegs:$Rt32),
"$Rd32 = vmpyh($Rs32,$Rt32):<<1:rnd:sat",
-M_tc_3x_SLOT23, TypeM>, Enc_14071773 {
+tc_8c8041e6, TypeM>, Enc_5ab2be {
let Inst{7-5} = 0b111;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101101101;
@@ -16132,7 +16186,7 @@ def M2_vmpy2su_s0 : HInst<
(outs DoubleRegs:$Rdd32),
(ins IntRegs:$Rs32, IntRegs:$Rt32),
"$Rdd32 = vmpyhsu($Rs32,$Rt32):sat",
-M_tc_3x_SLOT23, TypeM>, Enc_1997594 {
+tc_8c8041e6, TypeM>, Enc_be32a5 {
let Inst{7-5} = 0b111;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11100101000;
@@ -16143,7 +16197,7 @@ def M2_vmpy2su_s1 : HInst<
(outs DoubleRegs:$Rdd32),
(ins IntRegs:$Rs32, IntRegs:$Rt32),
"$Rdd32 = vmpyhsu($Rs32,$Rt32):<<1:sat",
-M_tc_3x_SLOT23, TypeM>, Enc_1997594 {
+tc_8c8041e6, TypeM>, Enc_be32a5 {
let Inst{7-5} = 0b111;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11100101100;
@@ -16154,7 +16208,7 @@ def M2_vraddh : HInst<
(outs IntRegs:$Rd32),
(ins DoubleRegs:$Rss32, DoubleRegs:$Rtt32),
"$Rd32 = vraddh($Rss32,$Rtt32)",
-M_tc_3x_SLOT23, TypeM>, Enc_9277990 {
+tc_8c8041e6, TypeM>, Enc_d2216a {
let Inst{7-5} = 0b111;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101001001;
@@ -16166,7 +16220,7 @@ def M2_vradduh : HInst<
(outs IntRegs:$Rd32),
(ins DoubleRegs:$Rss32, DoubleRegs:$Rtt32),
"$Rd32 = vradduh($Rss32,$Rtt32)",
-M_tc_3x_SLOT23, TypeM>, Enc_9277990 {
+tc_8c8041e6, TypeM>, Enc_d2216a {
let Inst{7-5} = 0b001;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101001000;
@@ -16178,7 +16232,7 @@ def M2_vrcmaci_s0 : HInst<
(outs DoubleRegs:$Rxx32),
(ins DoubleRegs:$Rxx32in, DoubleRegs:$Rss32, DoubleRegs:$Rtt32),
"$Rxx32 += vrcmpyi($Rss32,$Rtt32)",
-M_tc_3x_acc_SLOT23, TypeM>, Enc_12702821 {
+tc_8cb685d9, TypeM>, Enc_88c16c {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101010000;
@@ -16189,7 +16243,7 @@ def M2_vrcmaci_s0c : HInst<
(outs DoubleRegs:$Rxx32),
(ins DoubleRegs:$Rxx32in, DoubleRegs:$Rss32, DoubleRegs:$Rtt32),
"$Rxx32 += vrcmpyi($Rss32,$Rtt32*)",
-M_tc_3x_acc_SLOT23, TypeM>, Enc_12702821 {
+tc_8cb685d9, TypeM>, Enc_88c16c {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101010010;
@@ -16200,7 +16254,7 @@ def M2_vrcmacr_s0 : HInst<
(outs DoubleRegs:$Rxx32),
(ins DoubleRegs:$Rxx32in, DoubleRegs:$Rss32, DoubleRegs:$Rtt32),
"$Rxx32 += vrcmpyr($Rss32,$Rtt32)",
-M_tc_3x_acc_SLOT23, TypeM>, Enc_12702821 {
+tc_8cb685d9, TypeM>, Enc_88c16c {
let Inst{7-5} = 0b001;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101010000;
@@ -16211,7 +16265,7 @@ def M2_vrcmacr_s0c : HInst<
(outs DoubleRegs:$Rxx32),
(ins DoubleRegs:$Rxx32in, DoubleRegs:$Rss32, DoubleRegs:$Rtt32),
"$Rxx32 += vrcmpyr($Rss32,$Rtt32*)",
-M_tc_3x_acc_SLOT23, TypeM>, Enc_12702821 {
+tc_8cb685d9, TypeM>, Enc_88c16c {
let Inst{7-5} = 0b001;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101010011;
@@ -16222,7 +16276,7 @@ def M2_vrcmpyi_s0 : HInst<
(outs DoubleRegs:$Rdd32),
(ins DoubleRegs:$Rss32, DoubleRegs:$Rtt32),
"$Rdd32 = vrcmpyi($Rss32,$Rtt32)",
-M_tc_3x_SLOT23, TypeM>, Enc_8333157 {
+tc_8c8041e6, TypeM>, Enc_a56825 {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101000000;
@@ -16232,7 +16286,7 @@ def M2_vrcmpyi_s0c : HInst<
(outs DoubleRegs:$Rdd32),
(ins DoubleRegs:$Rss32, DoubleRegs:$Rtt32),
"$Rdd32 = vrcmpyi($Rss32,$Rtt32*)",
-M_tc_3x_SLOT23, TypeM>, Enc_8333157 {
+tc_8c8041e6, TypeM>, Enc_a56825 {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101000010;
@@ -16242,7 +16296,7 @@ def M2_vrcmpyr_s0 : HInst<
(outs DoubleRegs:$Rdd32),
(ins DoubleRegs:$Rss32, DoubleRegs:$Rtt32),
"$Rdd32 = vrcmpyr($Rss32,$Rtt32)",
-M_tc_3x_SLOT23, TypeM>, Enc_8333157 {
+tc_8c8041e6, TypeM>, Enc_a56825 {
let Inst{7-5} = 0b001;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101000000;
@@ -16252,7 +16306,7 @@ def M2_vrcmpyr_s0c : HInst<
(outs DoubleRegs:$Rdd32),
(ins DoubleRegs:$Rss32, DoubleRegs:$Rtt32),
"$Rdd32 = vrcmpyr($Rss32,$Rtt32*)",
-M_tc_3x_SLOT23, TypeM>, Enc_8333157 {
+tc_8c8041e6, TypeM>, Enc_a56825 {
let Inst{7-5} = 0b001;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101000011;
@@ -16262,7 +16316,7 @@ def M2_vrcmpys_acc_s1 : HInst<
(outs DoubleRegs:$Rxx32),
(ins DoubleRegs:$Rxx32in, DoubleRegs:$Rss32, IntRegs:$Rt32),
"$Rxx32 += vrcmpys($Rss32,$Rt32):<<1:sat",
-M_tc_3x_SLOT23, TypeM> {
+tc_8cb685d9, TypeM> {
let isPseudo = 1;
let Constraints = "$Rxx32 = $Rxx32in";
}
@@ -16270,7 +16324,7 @@ def M2_vrcmpys_acc_s1_h : HInst<
(outs DoubleRegs:$Rxx32),
(ins DoubleRegs:$Rxx32in, DoubleRegs:$Rss32, DoubleRegs:$Rtt32),
"$Rxx32 += vrcmpys($Rss32,$Rtt32):<<1:sat:raw:hi",
-M_tc_3x_SLOT23, TypeM>, Enc_12702821 {
+tc_8cb685d9, TypeM>, Enc_88c16c {
let Inst{7-5} = 0b100;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101010101;
@@ -16282,7 +16336,7 @@ def M2_vrcmpys_acc_s1_l : HInst<
(outs DoubleRegs:$Rxx32),
(ins DoubleRegs:$Rxx32in, DoubleRegs:$Rss32, DoubleRegs:$Rtt32),
"$Rxx32 += vrcmpys($Rss32,$Rtt32):<<1:sat:raw:lo",
-M_tc_3x_SLOT23, TypeM>, Enc_12702821 {
+tc_8cb685d9, TypeM>, Enc_88c16c {
let Inst{7-5} = 0b100;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101010111;
@@ -16294,14 +16348,14 @@ def M2_vrcmpys_s1 : HInst<
(outs DoubleRegs:$Rdd32),
(ins DoubleRegs:$Rss32, IntRegs:$Rt32),
"$Rdd32 = vrcmpys($Rss32,$Rt32):<<1:sat",
-M_tc_3x_SLOT23, TypeM> {
+tc_8c8041e6, TypeM> {
let isPseudo = 1;
}
def M2_vrcmpys_s1_h : HInst<
(outs DoubleRegs:$Rdd32),
(ins DoubleRegs:$Rss32, DoubleRegs:$Rtt32),
"$Rdd32 = vrcmpys($Rss32,$Rtt32):<<1:sat:raw:hi",
-M_tc_3x_SLOT23, TypeM>, Enc_8333157 {
+tc_8c8041e6, TypeM>, Enc_a56825 {
let Inst{7-5} = 0b100;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101000101;
@@ -16312,7 +16366,7 @@ def M2_vrcmpys_s1_l : HInst<
(outs DoubleRegs:$Rdd32),
(ins DoubleRegs:$Rss32, DoubleRegs:$Rtt32),
"$Rdd32 = vrcmpys($Rss32,$Rtt32):<<1:sat:raw:lo",
-M_tc_3x_SLOT23, TypeM>, Enc_8333157 {
+tc_8c8041e6, TypeM>, Enc_a56825 {
let Inst{7-5} = 0b100;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101000111;
@@ -16323,7 +16377,7 @@ def M2_vrcmpys_s1rp : HInst<
(outs IntRegs:$Rd32),
(ins DoubleRegs:$Rss32, IntRegs:$Rt32),
"$Rd32 = vrcmpys($Rss32,$Rt32):<<1:rnd:sat",
-M_tc_3x_SLOT23, TypeM> {
+tc_8c8041e6, TypeM> {
let hasNewValue = 1;
let opNewValue = 0;
let isPseudo = 1;
@@ -16332,7 +16386,7 @@ def M2_vrcmpys_s1rp_h : HInst<
(outs IntRegs:$Rd32),
(ins DoubleRegs:$Rss32, DoubleRegs:$Rtt32),
"$Rd32 = vrcmpys($Rss32,$Rtt32):<<1:rnd:sat:raw:hi",
-M_tc_3x_SLOT23, TypeM>, Enc_9277990 {
+tc_8c8041e6, TypeM>, Enc_d2216a {
let Inst{7-5} = 0b110;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101001101;
@@ -16345,7 +16399,7 @@ def M2_vrcmpys_s1rp_l : HInst<
(outs IntRegs:$Rd32),
(ins DoubleRegs:$Rss32, DoubleRegs:$Rtt32),
"$Rd32 = vrcmpys($Rss32,$Rtt32):<<1:rnd:sat:raw:lo",
-M_tc_3x_SLOT23, TypeM>, Enc_9277990 {
+tc_8c8041e6, TypeM>, Enc_d2216a {
let Inst{7-5} = 0b111;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101001101;
@@ -16358,7 +16412,7 @@ def M2_vrmac_s0 : HInst<
(outs DoubleRegs:$Rxx32),
(ins DoubleRegs:$Rxx32in, DoubleRegs:$Rss32, DoubleRegs:$Rtt32),
"$Rxx32 += vrmpyh($Rss32,$Rtt32)",
-M_tc_3x_acc_SLOT23, TypeM>, Enc_12702821 {
+tc_8cb685d9, TypeM>, Enc_88c16c {
let Inst{7-5} = 0b010;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101010000;
@@ -16369,7 +16423,7 @@ def M2_vrmpy_s0 : HInst<
(outs DoubleRegs:$Rdd32),
(ins DoubleRegs:$Rss32, DoubleRegs:$Rtt32),
"$Rdd32 = vrmpyh($Rss32,$Rtt32)",
-M_tc_3x_SLOT23, TypeM>, Enc_8333157 {
+tc_8c8041e6, TypeM>, Enc_a56825 {
let Inst{7-5} = 0b010;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101000000;
@@ -16379,7 +16433,7 @@ def M2_xor_xacc : HInst<
(outs IntRegs:$Rx32),
(ins IntRegs:$Rx32in, IntRegs:$Rs32, IntRegs:$Rt32),
"$Rx32 ^= xor($Rs32,$Rt32)",
-M_tc_2_acc_SLOT23, TypeM>, Enc_9223889 {
+tc_3c10f809, TypeM>, Enc_2ae154 {
let Inst{7-5} = 0b011;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101111100;
@@ -16393,7 +16447,7 @@ def M4_and_and : HInst<
(outs IntRegs:$Rx32),
(ins IntRegs:$Rx32in, IntRegs:$Rs32, IntRegs:$Rt32),
"$Rx32 &= and($Rs32,$Rt32)",
-M_tc_2_acc_SLOT23, TypeM>, Enc_9223889 {
+tc_3c10f809, TypeM>, Enc_2ae154 {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101111010;
@@ -16407,7 +16461,7 @@ def M4_and_andn : HInst<
(outs IntRegs:$Rx32),
(ins IntRegs:$Rx32in, IntRegs:$Rs32, IntRegs:$Rt32),
"$Rx32 &= and($Rs32,~$Rt32)",
-M_tc_2_acc_SLOT23, TypeM>, Enc_9223889 {
+tc_3c10f809, TypeM>, Enc_2ae154 {
let Inst{7-5} = 0b001;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101111001;
@@ -16421,7 +16475,7 @@ def M4_and_or : HInst<
(outs IntRegs:$Rx32),
(ins IntRegs:$Rx32in, IntRegs:$Rs32, IntRegs:$Rt32),
"$Rx32 &= or($Rs32,$Rt32)",
-M_tc_2_acc_SLOT23, TypeM>, Enc_9223889 {
+tc_3c10f809, TypeM>, Enc_2ae154 {
let Inst{7-5} = 0b001;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101111010;
@@ -16435,7 +16489,7 @@ def M4_and_xor : HInst<
(outs IntRegs:$Rx32),
(ins IntRegs:$Rx32in, IntRegs:$Rs32, IntRegs:$Rt32),
"$Rx32 &= xor($Rs32,$Rt32)",
-M_tc_2_acc_SLOT23, TypeM>, Enc_9223889 {
+tc_3c10f809, TypeM>, Enc_2ae154 {
let Inst{7-5} = 0b010;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101111010;
@@ -16449,7 +16503,7 @@ def M4_cmpyi_wh : HInst<
(outs IntRegs:$Rd32),
(ins DoubleRegs:$Rss32, IntRegs:$Rt32),
"$Rd32 = cmpyiwh($Rss32,$Rt32):<<1:rnd:sat",
-S_3op_tc_3x_SLOT23, TypeS_3op>, Enc_14287645 {
+tc_8c8041e6, TypeS_3op>, Enc_3d5b28 {
let Inst{7-5} = 0b100;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11000101000;
@@ -16462,7 +16516,7 @@ def M4_cmpyi_whc : HInst<
(outs IntRegs:$Rd32),
(ins DoubleRegs:$Rss32, IntRegs:$Rt32),
"$Rd32 = cmpyiwh($Rss32,$Rt32*):<<1:rnd:sat",
-S_3op_tc_3x_SLOT23, TypeS_3op>, Enc_14287645, Requires<[HasV5T]> {
+tc_8c8041e6, TypeS_3op>, Enc_3d5b28, Requires<[HasV5T]> {
let Inst{7-5} = 0b101;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11000101000;
@@ -16475,7 +16529,7 @@ def M4_cmpyr_wh : HInst<
(outs IntRegs:$Rd32),
(ins DoubleRegs:$Rss32, IntRegs:$Rt32),
"$Rd32 = cmpyrwh($Rss32,$Rt32):<<1:rnd:sat",
-S_3op_tc_3x_SLOT23, TypeS_3op>, Enc_14287645 {
+tc_8c8041e6, TypeS_3op>, Enc_3d5b28 {
let Inst{7-5} = 0b110;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11000101000;
@@ -16488,7 +16542,7 @@ def M4_cmpyr_whc : HInst<
(outs IntRegs:$Rd32),
(ins DoubleRegs:$Rss32, IntRegs:$Rt32),
"$Rd32 = cmpyrwh($Rss32,$Rt32*):<<1:rnd:sat",
-S_3op_tc_3x_SLOT23, TypeS_3op>, Enc_14287645, Requires<[HasV5T]> {
+tc_8c8041e6, TypeS_3op>, Enc_3d5b28, Requires<[HasV5T]> {
let Inst{7-5} = 0b111;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11000101000;
@@ -16501,7 +16555,7 @@ def M4_mac_up_s1_sat : HInst<
(outs IntRegs:$Rx32),
(ins IntRegs:$Rx32in, IntRegs:$Rs32, IntRegs:$Rt32),
"$Rx32 += mpy($Rs32,$Rt32):<<1:sat",
-M_tc_3x_acc_SLOT23, TypeM>, Enc_9223889 {
+tc_8cb685d9, TypeM>, Enc_2ae154 {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101111011;
@@ -16516,7 +16570,7 @@ def M4_mpyri_addi : HInst<
(outs IntRegs:$Rd32),
(ins u32_0Imm:$Ii, IntRegs:$Rs32, u6_0Imm:$II),
"$Rd32 = add(#$Ii,mpyi($Rs32,#$II))",
-ALU64_tc_3x_SLOT23, TypeALU64>, Enc_971574, ImmRegRel {
+tc_a12a5971, TypeALU64>, Enc_322e1b, ImmRegRel {
let Inst{31-24} = 0b11011000;
let hasNewValue = 1;
let opNewValue = 0;
@@ -16532,7 +16586,7 @@ def M4_mpyri_addr : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Ru32, IntRegs:$Rs32, u32_0Imm:$Ii),
"$Rd32 = add($Ru32,mpyi($Rs32,#$Ii))",
-ALU64_tc_3x_SLOT23, TypeALU64>, Enc_236434, ImmRegRel {
+tc_a12a5971, TypeALU64>, Enc_420cf3, ImmRegRel {
let Inst{31-23} = 0b110111111;
let hasNewValue = 1;
let opNewValue = 0;
@@ -16549,7 +16603,7 @@ def M4_mpyri_addr_u2 : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Ru32, u6_2Imm:$Ii, IntRegs:$Rs32),
"$Rd32 = add($Ru32,mpyi(#$Ii,$Rs32))",
-ALU64_tc_3x_SLOT23, TypeALU64>, Enc_9959498 {
+tc_69bb508b, TypeALU64>, Enc_277737 {
let Inst{31-23} = 0b110111110;
let hasNewValue = 1;
let opNewValue = 0;
@@ -16559,7 +16613,7 @@ def M4_mpyrr_addi : HInst<
(outs IntRegs:$Rd32),
(ins u32_0Imm:$Ii, IntRegs:$Rs32, IntRegs:$Rt32),
"$Rd32 = add(#$Ii,mpyi($Rs32,$Rt32))",
-ALU64_tc_3x_SLOT23, TypeALU64>, Enc_2216485, ImmRegRel {
+tc_8cb685d9, TypeALU64>, Enc_a7b8e8, ImmRegRel {
let Inst{31-23} = 0b110101110;
let hasNewValue = 1;
let opNewValue = 0;
@@ -16576,7 +16630,7 @@ def M4_mpyrr_addr : HInst<
(outs IntRegs:$Ry32),
(ins IntRegs:$Ru32, IntRegs:$Ry32in, IntRegs:$Rs32),
"$Ry32 = add($Ru32,mpyi($Ry32in,$Rs32))",
-M_tc_3x_SLOT23, TypeM>, Enc_13770697, ImmRegRel {
+tc_8cb685d9, TypeM>, Enc_7f1a05, ImmRegRel {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11100011000;
@@ -16591,7 +16645,7 @@ def M4_nac_up_s1_sat : HInst<
(outs IntRegs:$Rx32),
(ins IntRegs:$Rx32in, IntRegs:$Rs32, IntRegs:$Rt32),
"$Rx32 -= mpy($Rs32,$Rt32):<<1:sat",
-M_tc_3x_acc_SLOT23, TypeM>, Enc_9223889 {
+tc_8cb685d9, TypeM>, Enc_2ae154 {
let Inst{7-5} = 0b001;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101111011;
@@ -16606,7 +16660,7 @@ def M4_or_and : HInst<
(outs IntRegs:$Rx32),
(ins IntRegs:$Rx32in, IntRegs:$Rs32, IntRegs:$Rt32),
"$Rx32 |= and($Rs32,$Rt32)",
-M_tc_2_acc_SLOT23, TypeM>, Enc_9223889 {
+tc_3c10f809, TypeM>, Enc_2ae154 {
let Inst{7-5} = 0b011;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101111010;
@@ -16620,7 +16674,7 @@ def M4_or_andn : HInst<
(outs IntRegs:$Rx32),
(ins IntRegs:$Rx32in, IntRegs:$Rs32, IntRegs:$Rt32),
"$Rx32 |= and($Rs32,~$Rt32)",
-M_tc_2_acc_SLOT23, TypeM>, Enc_9223889 {
+tc_3c10f809, TypeM>, Enc_2ae154 {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101111001;
@@ -16634,7 +16688,7 @@ def M4_or_or : HInst<
(outs IntRegs:$Rx32),
(ins IntRegs:$Rx32in, IntRegs:$Rs32, IntRegs:$Rt32),
"$Rx32 |= or($Rs32,$Rt32)",
-M_tc_2_acc_SLOT23, TypeM>, Enc_9223889 {
+tc_3c10f809, TypeM>, Enc_2ae154 {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101111110;
@@ -16648,7 +16702,7 @@ def M4_or_xor : HInst<
(outs IntRegs:$Rx32),
(ins IntRegs:$Rx32in, IntRegs:$Rs32, IntRegs:$Rt32),
"$Rx32 |= xor($Rs32,$Rt32)",
-M_tc_2_acc_SLOT23, TypeM>, Enc_9223889 {
+tc_3c10f809, TypeM>, Enc_2ae154 {
let Inst{7-5} = 0b001;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101111110;
@@ -16662,7 +16716,7 @@ def M4_pmpyw : HInst<
(outs DoubleRegs:$Rdd32),
(ins IntRegs:$Rs32, IntRegs:$Rt32),
"$Rdd32 = pmpyw($Rs32,$Rt32)",
-M_tc_3x_SLOT23, TypeM>, Enc_1997594 {
+tc_8c8041e6, TypeM>, Enc_be32a5 {
let Inst{7-5} = 0b111;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11100101010;
@@ -16672,7 +16726,7 @@ def M4_pmpyw_acc : HInst<
(outs DoubleRegs:$Rxx32),
(ins DoubleRegs:$Rxx32in, IntRegs:$Rs32, IntRegs:$Rt32),
"$Rxx32 ^= pmpyw($Rs32,$Rt32)",
-M_tc_3x_SLOT23, TypeM>, Enc_1409050 {
+tc_8cb685d9, TypeM>, Enc_61f0b0 {
let Inst{7-5} = 0b111;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11100111001;
@@ -16683,7 +16737,7 @@ def M4_vpmpyh : HInst<
(outs DoubleRegs:$Rdd32),
(ins IntRegs:$Rs32, IntRegs:$Rt32),
"$Rdd32 = vpmpyh($Rs32,$Rt32)",
-M_tc_3x_SLOT23, TypeM>, Enc_1997594 {
+tc_8c8041e6, TypeM>, Enc_be32a5 {
let Inst{7-5} = 0b111;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11100101110;
@@ -16693,7 +16747,7 @@ def M4_vpmpyh_acc : HInst<
(outs DoubleRegs:$Rxx32),
(ins DoubleRegs:$Rxx32in, IntRegs:$Rs32, IntRegs:$Rt32),
"$Rxx32 ^= vpmpyh($Rs32,$Rt32)",
-M_tc_3x_acc_SLOT23, TypeM>, Enc_1409050 {
+tc_8cb685d9, TypeM>, Enc_61f0b0 {
let Inst{7-5} = 0b111;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11100111101;
@@ -16704,7 +16758,7 @@ def M4_vrmpyeh_acc_s0 : HInst<
(outs DoubleRegs:$Rxx32),
(ins DoubleRegs:$Rxx32in, DoubleRegs:$Rss32, DoubleRegs:$Rtt32),
"$Rxx32 += vrmpyweh($Rss32,$Rtt32)",
-M_tc_3x_acc_SLOT23, TypeM>, Enc_12702821 {
+tc_8cb685d9, TypeM>, Enc_88c16c {
let Inst{7-5} = 0b110;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101010001;
@@ -16715,7 +16769,7 @@ def M4_vrmpyeh_acc_s1 : HInst<
(outs DoubleRegs:$Rxx32),
(ins DoubleRegs:$Rxx32in, DoubleRegs:$Rss32, DoubleRegs:$Rtt32),
"$Rxx32 += vrmpyweh($Rss32,$Rtt32):<<1",
-M_tc_3x_acc_SLOT23, TypeM>, Enc_12702821 {
+tc_8cb685d9, TypeM>, Enc_88c16c {
let Inst{7-5} = 0b110;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101010101;
@@ -16726,7 +16780,7 @@ def M4_vrmpyeh_s0 : HInst<
(outs DoubleRegs:$Rdd32),
(ins DoubleRegs:$Rss32, DoubleRegs:$Rtt32),
"$Rdd32 = vrmpyweh($Rss32,$Rtt32)",
-M_tc_3x_SLOT23, TypeM>, Enc_8333157 {
+tc_8c8041e6, TypeM>, Enc_a56825 {
let Inst{7-5} = 0b100;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101000010;
@@ -16736,7 +16790,7 @@ def M4_vrmpyeh_s1 : HInst<
(outs DoubleRegs:$Rdd32),
(ins DoubleRegs:$Rss32, DoubleRegs:$Rtt32),
"$Rdd32 = vrmpyweh($Rss32,$Rtt32):<<1",
-M_tc_3x_SLOT23, TypeM>, Enc_8333157 {
+tc_8c8041e6, TypeM>, Enc_a56825 {
let Inst{7-5} = 0b100;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101000110;
@@ -16746,7 +16800,7 @@ def M4_vrmpyoh_acc_s0 : HInst<
(outs DoubleRegs:$Rxx32),
(ins DoubleRegs:$Rxx32in, DoubleRegs:$Rss32, DoubleRegs:$Rtt32),
"$Rxx32 += vrmpywoh($Rss32,$Rtt32)",
-M_tc_3x_SLOT23, TypeM>, Enc_12702821 {
+tc_8cb685d9, TypeM>, Enc_88c16c {
let Inst{7-5} = 0b110;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101010011;
@@ -16757,7 +16811,7 @@ def M4_vrmpyoh_acc_s1 : HInst<
(outs DoubleRegs:$Rxx32),
(ins DoubleRegs:$Rxx32in, DoubleRegs:$Rss32, DoubleRegs:$Rtt32),
"$Rxx32 += vrmpywoh($Rss32,$Rtt32):<<1",
-M_tc_3x_SLOT23, TypeM>, Enc_12702821 {
+tc_8cb685d9, TypeM>, Enc_88c16c {
let Inst{7-5} = 0b110;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101010111;
@@ -16768,7 +16822,7 @@ def M4_vrmpyoh_s0 : HInst<
(outs DoubleRegs:$Rdd32),
(ins DoubleRegs:$Rss32, DoubleRegs:$Rtt32),
"$Rdd32 = vrmpywoh($Rss32,$Rtt32)",
-M_tc_3x_SLOT23, TypeM>, Enc_8333157 {
+tc_8c8041e6, TypeM>, Enc_a56825 {
let Inst{7-5} = 0b010;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101000001;
@@ -16778,7 +16832,7 @@ def M4_vrmpyoh_s1 : HInst<
(outs DoubleRegs:$Rdd32),
(ins DoubleRegs:$Rss32, DoubleRegs:$Rtt32),
"$Rdd32 = vrmpywoh($Rss32,$Rtt32):<<1",
-M_tc_3x_SLOT23, TypeM>, Enc_8333157 {
+tc_8c8041e6, TypeM>, Enc_a56825 {
let Inst{7-5} = 0b010;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101000101;
@@ -16788,7 +16842,7 @@ def M4_xor_and : HInst<
(outs IntRegs:$Rx32),
(ins IntRegs:$Rx32in, IntRegs:$Rs32, IntRegs:$Rt32),
"$Rx32 ^= and($Rs32,$Rt32)",
-M_tc_2_acc_SLOT23, TypeM>, Enc_9223889 {
+tc_3c10f809, TypeM>, Enc_2ae154 {
let Inst{7-5} = 0b010;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101111110;
@@ -16802,7 +16856,7 @@ def M4_xor_andn : HInst<
(outs IntRegs:$Rx32),
(ins IntRegs:$Rx32in, IntRegs:$Rs32, IntRegs:$Rt32),
"$Rx32 ^= and($Rs32,~$Rt32)",
-M_tc_2_acc_SLOT23, TypeM>, Enc_9223889 {
+tc_3c10f809, TypeM>, Enc_2ae154 {
let Inst{7-5} = 0b010;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101111001;
@@ -16816,7 +16870,7 @@ def M4_xor_or : HInst<
(outs IntRegs:$Rx32),
(ins IntRegs:$Rx32in, IntRegs:$Rs32, IntRegs:$Rt32),
"$Rx32 ^= or($Rs32,$Rt32)",
-M_tc_2_acc_SLOT23, TypeM>, Enc_9223889 {
+tc_3c10f809, TypeM>, Enc_2ae154 {
let Inst{7-5} = 0b011;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101111110;
@@ -16830,7 +16884,7 @@ def M4_xor_xacc : HInst<
(outs DoubleRegs:$Rxx32),
(ins DoubleRegs:$Rxx32in, DoubleRegs:$Rss32, DoubleRegs:$Rtt32),
"$Rxx32 ^= xor($Rss32,$Rtt32)",
-S_3op_tc_1_SLOT23, TypeS_3op>, Enc_12702821 {
+tc_3c10f809, TypeS_3op>, Enc_88c16c {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11001010100;
@@ -16841,7 +16895,7 @@ def M5_vdmacbsu : HInst<
(outs DoubleRegs:$Rxx32),
(ins DoubleRegs:$Rxx32in, DoubleRegs:$Rss32, DoubleRegs:$Rtt32),
"$Rxx32 += vdmpybsu($Rss32,$Rtt32):sat",
-M_tc_3x_acc_SLOT23, TypeM>, Enc_12702821, Requires<[HasV5T]> {
+tc_8cb685d9, TypeM>, Enc_88c16c, Requires<[HasV5T]> {
let Inst{7-5} = 0b001;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101010001;
@@ -16853,7 +16907,7 @@ def M5_vdmpybsu : HInst<
(outs DoubleRegs:$Rdd32),
(ins DoubleRegs:$Rss32, DoubleRegs:$Rtt32),
"$Rdd32 = vdmpybsu($Rss32,$Rtt32):sat",
-M_tc_3x_SLOT23, TypeM>, Enc_8333157, Requires<[HasV5T]> {
+tc_8c8041e6, TypeM>, Enc_a56825, Requires<[HasV5T]> {
let Inst{7-5} = 0b001;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101000101;
@@ -16864,7 +16918,7 @@ def M5_vmacbsu : HInst<
(outs DoubleRegs:$Rxx32),
(ins DoubleRegs:$Rxx32in, IntRegs:$Rs32, IntRegs:$Rt32),
"$Rxx32 += vmpybsu($Rs32,$Rt32)",
-M_tc_3x_acc_SLOT23, TypeM>, Enc_1409050 {
+tc_8cb685d9, TypeM>, Enc_61f0b0 {
let Inst{7-5} = 0b001;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11100111110;
@@ -16875,7 +16929,7 @@ def M5_vmacbuu : HInst<
(outs DoubleRegs:$Rxx32),
(ins DoubleRegs:$Rxx32in, IntRegs:$Rs32, IntRegs:$Rt32),
"$Rxx32 += vmpybu($Rs32,$Rt32)",
-M_tc_3x_acc_SLOT23, TypeM>, Enc_1409050 {
+tc_8cb685d9, TypeM>, Enc_61f0b0 {
let Inst{7-5} = 0b001;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11100111100;
@@ -16886,7 +16940,7 @@ def M5_vmpybsu : HInst<
(outs DoubleRegs:$Rdd32),
(ins IntRegs:$Rs32, IntRegs:$Rt32),
"$Rdd32 = vmpybsu($Rs32,$Rt32)",
-M_tc_3x_SLOT23, TypeM>, Enc_1997594 {
+tc_8c8041e6, TypeM>, Enc_be32a5 {
let Inst{7-5} = 0b001;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11100101010;
@@ -16896,7 +16950,7 @@ def M5_vmpybuu : HInst<
(outs DoubleRegs:$Rdd32),
(ins IntRegs:$Rs32, IntRegs:$Rt32),
"$Rdd32 = vmpybu($Rs32,$Rt32)",
-M_tc_3x_SLOT23, TypeM>, Enc_1997594 {
+tc_8c8041e6, TypeM>, Enc_be32a5 {
let Inst{7-5} = 0b001;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11100101100;
@@ -16906,7 +16960,7 @@ def M5_vrmacbsu : HInst<
(outs DoubleRegs:$Rxx32),
(ins DoubleRegs:$Rxx32in, DoubleRegs:$Rss32, DoubleRegs:$Rtt32),
"$Rxx32 += vrmpybsu($Rss32,$Rtt32)",
-M_tc_3x_acc_SLOT23, TypeM>, Enc_12702821 {
+tc_8cb685d9, TypeM>, Enc_88c16c {
let Inst{7-5} = 0b001;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101010110;
@@ -16917,7 +16971,7 @@ def M5_vrmacbuu : HInst<
(outs DoubleRegs:$Rxx32),
(ins DoubleRegs:$Rxx32in, DoubleRegs:$Rss32, DoubleRegs:$Rtt32),
"$Rxx32 += vrmpybu($Rss32,$Rtt32)",
-M_tc_3x_acc_SLOT23, TypeM>, Enc_12702821 {
+tc_8cb685d9, TypeM>, Enc_88c16c {
let Inst{7-5} = 0b001;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101010100;
@@ -16928,7 +16982,7 @@ def M5_vrmpybsu : HInst<
(outs DoubleRegs:$Rdd32),
(ins DoubleRegs:$Rss32, DoubleRegs:$Rtt32),
"$Rdd32 = vrmpybsu($Rss32,$Rtt32)",
-M_tc_3x_SLOT23, TypeM>, Enc_8333157 {
+tc_8c8041e6, TypeM>, Enc_a56825 {
let Inst{7-5} = 0b001;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101000110;
@@ -16938,7 +16992,7 @@ def M5_vrmpybuu : HInst<
(outs DoubleRegs:$Rdd32),
(ins DoubleRegs:$Rss32, DoubleRegs:$Rtt32),
"$Rdd32 = vrmpybu($Rss32,$Rtt32)",
-M_tc_3x_SLOT23, TypeM>, Enc_8333157 {
+tc_8c8041e6, TypeM>, Enc_a56825 {
let Inst{7-5} = 0b001;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101000100;
@@ -16948,7 +17002,7 @@ def M6_vabsdiffb : HInst<
(outs DoubleRegs:$Rdd32),
(ins DoubleRegs:$Rtt32, DoubleRegs:$Rss32),
"$Rdd32 = vabsdiffb($Rtt32,$Rss32)",
-M_tc_2_SLOT23, TypeM>, Enc_11687333, Requires<[HasV62T]> {
+tc_faab1248, TypeM>, Enc_ea23e4, Requires<[HasV62T]> {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101000111;
@@ -16958,7 +17012,7 @@ def M6_vabsdiffub : HInst<
(outs DoubleRegs:$Rdd32),
(ins DoubleRegs:$Rtt32, DoubleRegs:$Rss32),
"$Rdd32 = vabsdiffub($Rtt32,$Rss32)",
-M_tc_2_SLOT23, TypeM>, Enc_11687333, Requires<[HasV62T]> {
+tc_faab1248, TypeM>, Enc_ea23e4, Requires<[HasV62T]> {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11101000101;
@@ -16968,15 +17022,15 @@ def PS_loadrbabs : HInst<
(outs IntRegs:$Rd32),
(ins u32_0Imm:$Ii),
"$Rd32 = memb(#$Ii)",
-V2LDST_tc_ld_SLOT01, TypeV2LDST>, Enc_1886960, AddrModeRel {
+tc_70cabf66, TypeV2LDST>, Enc_25bef0, AddrModeRel {
let Inst{24-21} = 0b1000;
let Inst{31-27} = 0b01001;
let hasNewValue = 1;
let opNewValue = 0;
let addrMode = Absolute;
let accessSize = ByteAccess;
-let isExtended = 1;
let mayLoad = 1;
+let isExtended = 1;
let CextOpcode = "L2_loadrb";
let BaseOpcode = "L4_loadrb_abs";
let isPredicable = 1;
@@ -16991,13 +17045,13 @@ def PS_loadrdabs : HInst<
(outs DoubleRegs:$Rdd32),
(ins u29_3Imm:$Ii),
"$Rdd32 = memd(#$Ii)",
-V2LDST_tc_ld_SLOT01, TypeV2LDST>, Enc_4975051, AddrModeRel {
+tc_70cabf66, TypeV2LDST>, Enc_509701, AddrModeRel {
let Inst{24-21} = 0b1110;
let Inst{31-27} = 0b01001;
let addrMode = Absolute;
let accessSize = DoubleWordAccess;
-let isExtended = 1;
let mayLoad = 1;
+let isExtended = 1;
let CextOpcode = "L2_loadrd";
let BaseOpcode = "L4_loadrd_abs";
let isPredicable = 1;
@@ -17012,15 +17066,15 @@ def PS_loadrhabs : HInst<
(outs IntRegs:$Rd32),
(ins u31_1Imm:$Ii),
"$Rd32 = memh(#$Ii)",
-V2LDST_tc_ld_SLOT01, TypeV2LDST>, Enc_12608570, AddrModeRel {
+tc_70cabf66, TypeV2LDST>, Enc_8df4be, AddrModeRel {
let Inst{24-21} = 0b1010;
let Inst{31-27} = 0b01001;
let hasNewValue = 1;
let opNewValue = 0;
let addrMode = Absolute;
let accessSize = HalfWordAccess;
-let isExtended = 1;
let mayLoad = 1;
+let isExtended = 1;
let CextOpcode = "L2_loadrh";
let BaseOpcode = "L4_loadrh_abs";
let isPredicable = 1;
@@ -17035,15 +17089,15 @@ def PS_loadriabs : HInst<
(outs IntRegs:$Rd32),
(ins u30_2Imm:$Ii),
"$Rd32 = memw(#$Ii)",
-V2LDST_tc_ld_SLOT01, TypeV2LDST>, Enc_8814718, AddrModeRel {
+tc_70cabf66, TypeV2LDST>, Enc_4f4ed7, AddrModeRel {
let Inst{24-21} = 0b1100;
let Inst{31-27} = 0b01001;
let hasNewValue = 1;
let opNewValue = 0;
let addrMode = Absolute;
let accessSize = WordAccess;
-let isExtended = 1;
let mayLoad = 1;
+let isExtended = 1;
let CextOpcode = "L2_loadri";
let BaseOpcode = "L4_loadri_abs";
let isPredicable = 1;
@@ -17058,15 +17112,15 @@ def PS_loadrubabs : HInst<
(outs IntRegs:$Rd32),
(ins u32_0Imm:$Ii),
"$Rd32 = memub(#$Ii)",
-V2LDST_tc_ld_SLOT01, TypeV2LDST>, Enc_1886960, AddrModeRel {
+tc_70cabf66, TypeV2LDST>, Enc_25bef0, AddrModeRel {
let Inst{24-21} = 0b1001;
let Inst{31-27} = 0b01001;
let hasNewValue = 1;
let opNewValue = 0;
let addrMode = Absolute;
let accessSize = ByteAccess;
-let isExtended = 1;
let mayLoad = 1;
+let isExtended = 1;
let CextOpcode = "L2_loadrub";
let BaseOpcode = "L4_loadrub_abs";
let isPredicable = 1;
@@ -17081,15 +17135,15 @@ def PS_loadruhabs : HInst<
(outs IntRegs:$Rd32),
(ins u31_1Imm:$Ii),
"$Rd32 = memuh(#$Ii)",
-V2LDST_tc_ld_SLOT01, TypeV2LDST>, Enc_12608570, AddrModeRel {
+tc_70cabf66, TypeV2LDST>, Enc_8df4be, AddrModeRel {
let Inst{24-21} = 0b1011;
let Inst{31-27} = 0b01001;
let hasNewValue = 1;
let opNewValue = 0;
let addrMode = Absolute;
let accessSize = HalfWordAccess;
-let isExtended = 1;
let mayLoad = 1;
+let isExtended = 1;
let CextOpcode = "L2_loadruh";
let BaseOpcode = "L4_loadruh_abs";
let isPredicable = 1;
@@ -17104,7 +17158,7 @@ def PS_storerbabs : HInst<
(outs),
(ins u32_0Imm:$Ii, IntRegs:$Rt32),
"memb(#$Ii) = $Rt32",
-ST_tc_st_SLOT01, TypeV2LDST>, Enc_12395768, AddrModeRel {
+tc_c14739d5, TypeV2LDST>, Enc_1b64fb, AddrModeRel {
let Inst{24-21} = 0b0000;
let Inst{31-27} = 0b01001;
let addrMode = Absolute;
@@ -17126,16 +17180,16 @@ def PS_storerbnewabs : HInst<
(outs),
(ins u32_0Imm:$Ii, IntRegs:$Nt8),
"memb(#$Ii) = $Nt8.new",
-NCJ_tc_3or4stall_SLOT0, TypeV2LDST>, Enc_4050532, AddrModeRel {
+tc_9e86015f, TypeV2LDST>, Enc_ad1831, AddrModeRel {
let Inst{12-11} = 0b00;
let Inst{24-21} = 0b0101;
let Inst{31-27} = 0b01001;
let addrMode = Absolute;
let accessSize = ByteAccess;
let isNVStore = 1;
+let isNewValue = 1;
let isExtended = 1;
let mayStore = 1;
-let isNewValue = 1;
let CextOpcode = "S2_storerb";
let BaseOpcode = "S2_storerbabs";
let isPredicable = 1;
@@ -17151,7 +17205,7 @@ def PS_storerdabs : HInst<
(outs),
(ins u29_3Imm:$Ii, DoubleRegs:$Rtt32),
"memd(#$Ii) = $Rtt32",
-ST_tc_st_SLOT01, TypeV2LDST>, Enc_11682941, AddrModeRel {
+tc_c14739d5, TypeV2LDST>, Enc_5c124a, AddrModeRel {
let Inst{24-21} = 0b0110;
let Inst{31-27} = 0b01001;
let addrMode = Absolute;
@@ -17172,7 +17226,7 @@ def PS_storerfabs : HInst<
(outs),
(ins u31_1Imm:$Ii, IntRegs:$Rt32),
"memh(#$Ii) = $Rt32.h",
-ST_tc_st_SLOT01, TypeV2LDST>, Enc_1186018, AddrModeRel {
+tc_c14739d5, TypeV2LDST>, Enc_fda92c, AddrModeRel {
let Inst{24-21} = 0b0011;
let Inst{31-27} = 0b01001;
let addrMode = Absolute;
@@ -17193,7 +17247,7 @@ def PS_storerhabs : HInst<
(outs),
(ins u31_1Imm:$Ii, IntRegs:$Rt32),
"memh(#$Ii) = $Rt32",
-ST_tc_st_SLOT01, TypeV2LDST>, Enc_1186018, AddrModeRel {
+tc_c14739d5, TypeV2LDST>, Enc_fda92c, AddrModeRel {
let Inst{24-21} = 0b0010;
let Inst{31-27} = 0b01001;
let addrMode = Absolute;
@@ -17215,16 +17269,16 @@ def PS_storerhnewabs : HInst<
(outs),
(ins u31_1Imm:$Ii, IntRegs:$Nt8),
"memh(#$Ii) = $Nt8.new",
-NCJ_tc_3or4stall_SLOT0, TypeV2LDST>, Enc_13618890, AddrModeRel {
+tc_9e86015f, TypeV2LDST>, Enc_bc03e5, AddrModeRel {
let Inst{12-11} = 0b01;
let Inst{24-21} = 0b0101;
let Inst{31-27} = 0b01001;
let addrMode = Absolute;
let accessSize = HalfWordAccess;
let isNVStore = 1;
+let isNewValue = 1;
let isExtended = 1;
let mayStore = 1;
-let isNewValue = 1;
let CextOpcode = "S2_storerh";
let BaseOpcode = "S2_storerhabs";
let isPredicable = 1;
@@ -17240,7 +17294,7 @@ def PS_storeriabs : HInst<
(outs),
(ins u30_2Imm:$Ii, IntRegs:$Rt32),
"memw(#$Ii) = $Rt32",
-ST_tc_st_SLOT01, TypeV2LDST>, Enc_15999208, AddrModeRel {
+tc_c14739d5, TypeV2LDST>, Enc_541f26, AddrModeRel {
let Inst{24-21} = 0b0100;
let Inst{31-27} = 0b01001;
let addrMode = Absolute;
@@ -17262,16 +17316,16 @@ def PS_storerinewabs : HInst<
(outs),
(ins u30_2Imm:$Ii, IntRegs:$Nt8),
"memw(#$Ii) = $Nt8.new",
-NCJ_tc_3or4stall_SLOT0, TypeV2LDST>, Enc_12297800, AddrModeRel {
+tc_9e86015f, TypeV2LDST>, Enc_78cbf0, AddrModeRel {
let Inst{12-11} = 0b10;
let Inst{24-21} = 0b0101;
let Inst{31-27} = 0b01001;
let addrMode = Absolute;
let accessSize = WordAccess;
let isNVStore = 1;
+let isNewValue = 1;
let isExtended = 1;
let mayStore = 1;
-let isNewValue = 1;
let CextOpcode = "S2_storeri";
let BaseOpcode = "S2_storeriabs";
let isPredicable = 1;
@@ -17287,7 +17341,7 @@ def S2_addasl_rrri : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rt32, IntRegs:$Rs32, u3_0Imm:$Ii),
"$Rd32 = addasl($Rt32,$Rs32,#$Ii)",
-S_3op_tc_2_SLOT23, TypeS_3op>, Enc_3494181 {
+tc_090485bb, TypeS_3op>, Enc_47ef61 {
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11000100000;
let hasNewValue = 1;
@@ -17298,7 +17352,7 @@ def S2_allocframe : HInst<
(outs),
(ins u11_3Imm:$Ii),
"allocframe(#$Ii)",
-ST_tc_ld_SLOT0, TypeST>, Enc_15830826 {
+tc_0cb867f2, TypeST>, Enc_22c845 {
let Inst{13-11} = 0b000;
let Inst{31-21} = 0b10100000100;
let Inst{20-16} = 0b11101;
@@ -17312,7 +17366,7 @@ def S2_asl_i_p : HInst<
(outs DoubleRegs:$Rdd32),
(ins DoubleRegs:$Rss32, u6_0Imm:$Ii),
"$Rdd32 = asl($Rss32,#$Ii)",
-S_2op_tc_1_SLOT23, TypeS_2op>, Enc_4231995 {
+tc_9c18c9a5, TypeS_2op>, Enc_5eac98 {
let Inst{7-5} = 0b010;
let Inst{31-21} = 0b10000000000;
}
@@ -17320,7 +17374,7 @@ def S2_asl_i_p_acc : HInst<
(outs DoubleRegs:$Rxx32),
(ins DoubleRegs:$Rxx32in, DoubleRegs:$Rss32, u6_0Imm:$Ii),
"$Rxx32 += asl($Rss32,#$Ii)",
-S_2op_tc_2_SLOT23, TypeS_2op>, Enc_8497723 {
+tc_c0cd91a8, TypeS_2op>, Enc_70fb07 {
let Inst{7-5} = 0b110;
let Inst{31-21} = 0b10000010000;
let prefersSlot3 = 1;
@@ -17330,7 +17384,7 @@ def S2_asl_i_p_and : HInst<
(outs DoubleRegs:$Rxx32),
(ins DoubleRegs:$Rxx32in, DoubleRegs:$Rss32, u6_0Imm:$Ii),
"$Rxx32 &= asl($Rss32,#$Ii)",
-S_2op_tc_2_SLOT23, TypeS_2op>, Enc_8497723 {
+tc_3c10f809, TypeS_2op>, Enc_70fb07 {
let Inst{7-5} = 0b010;
let Inst{31-21} = 0b10000010010;
let prefersSlot3 = 1;
@@ -17340,7 +17394,7 @@ def S2_asl_i_p_nac : HInst<
(outs DoubleRegs:$Rxx32),
(ins DoubleRegs:$Rxx32in, DoubleRegs:$Rss32, u6_0Imm:$Ii),
"$Rxx32 -= asl($Rss32,#$Ii)",
-S_2op_tc_2_SLOT23, TypeS_2op>, Enc_8497723 {
+tc_c0cd91a8, TypeS_2op>, Enc_70fb07 {
let Inst{7-5} = 0b010;
let Inst{31-21} = 0b10000010000;
let prefersSlot3 = 1;
@@ -17350,7 +17404,7 @@ def S2_asl_i_p_or : HInst<
(outs DoubleRegs:$Rxx32),
(ins DoubleRegs:$Rxx32in, DoubleRegs:$Rss32, u6_0Imm:$Ii),
"$Rxx32 |= asl($Rss32,#$Ii)",
-S_2op_tc_2_SLOT23, TypeS_2op>, Enc_8497723 {
+tc_3c10f809, TypeS_2op>, Enc_70fb07 {
let Inst{7-5} = 0b110;
let Inst{31-21} = 0b10000010010;
let prefersSlot3 = 1;
@@ -17360,7 +17414,7 @@ def S2_asl_i_p_xacc : HInst<
(outs DoubleRegs:$Rxx32),
(ins DoubleRegs:$Rxx32in, DoubleRegs:$Rss32, u6_0Imm:$Ii),
"$Rxx32 ^= asl($Rss32,#$Ii)",
-S_2op_tc_2_SLOT23, TypeS_2op>, Enc_8497723 {
+tc_3c10f809, TypeS_2op>, Enc_70fb07 {
let Inst{7-5} = 0b010;
let Inst{31-21} = 0b10000010100;
let prefersSlot3 = 1;
@@ -17370,7 +17424,7 @@ def S2_asl_i_r : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rs32, u5_0Imm:$Ii),
"$Rd32 = asl($Rs32,#$Ii)",
-S_2op_tc_1_SLOT23, TypeS_2op>, Enc_2771456 {
+tc_9c18c9a5, TypeS_2op>, Enc_a05677 {
let Inst{7-5} = 0b010;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b10001100000;
@@ -17381,7 +17435,7 @@ def S2_asl_i_r_acc : HInst<
(outs IntRegs:$Rx32),
(ins IntRegs:$Rx32in, IntRegs:$Rs32, u5_0Imm:$Ii),
"$Rx32 += asl($Rs32,#$Ii)",
-S_2op_tc_2_SLOT23, TypeS_2op>, Enc_2410156 {
+tc_c0cd91a8, TypeS_2op>, Enc_28a2dc {
let Inst{7-5} = 0b110;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b10001110000;
@@ -17394,7 +17448,7 @@ def S2_asl_i_r_and : HInst<
(outs IntRegs:$Rx32),
(ins IntRegs:$Rx32in, IntRegs:$Rs32, u5_0Imm:$Ii),
"$Rx32 &= asl($Rs32,#$Ii)",
-S_2op_tc_2_SLOT23, TypeS_2op>, Enc_2410156 {
+tc_3c10f809, TypeS_2op>, Enc_28a2dc {
let Inst{7-5} = 0b010;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b10001110010;
@@ -17407,7 +17461,7 @@ def S2_asl_i_r_nac : HInst<
(outs IntRegs:$Rx32),
(ins IntRegs:$Rx32in, IntRegs:$Rs32, u5_0Imm:$Ii),
"$Rx32 -= asl($Rs32,#$Ii)",
-S_2op_tc_2_SLOT23, TypeS_2op>, Enc_2410156 {
+tc_c0cd91a8, TypeS_2op>, Enc_28a2dc {
let Inst{7-5} = 0b010;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b10001110000;
@@ -17420,7 +17474,7 @@ def S2_asl_i_r_or : HInst<
(outs IntRegs:$Rx32),
(ins IntRegs:$Rx32in, IntRegs:$Rs32, u5_0Imm:$Ii),
"$Rx32 |= asl($Rs32,#$Ii)",
-S_2op_tc_2_SLOT23, TypeS_2op>, Enc_2410156 {
+tc_3c10f809, TypeS_2op>, Enc_28a2dc {
let Inst{7-5} = 0b110;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b10001110010;
@@ -17433,19 +17487,20 @@ def S2_asl_i_r_sat : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rs32, u5_0Imm:$Ii),
"$Rd32 = asl($Rs32,#$Ii):sat",
-S_2op_tc_2_SLOT23, TypeS_2op>, Enc_2771456 {
+tc_47ab9233, TypeS_2op>, Enc_a05677 {
let Inst{7-5} = 0b010;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b10001100010;
let hasNewValue = 1;
let opNewValue = 0;
+let prefersSlot3 = 1;
let Defs = [USR_OVF];
}
def S2_asl_i_r_xacc : HInst<
(outs IntRegs:$Rx32),
(ins IntRegs:$Rx32in, IntRegs:$Rs32, u5_0Imm:$Ii),
"$Rx32 ^= asl($Rs32,#$Ii)",
-S_2op_tc_2_SLOT23, TypeS_2op>, Enc_2410156 {
+tc_3c10f809, TypeS_2op>, Enc_28a2dc {
let Inst{7-5} = 0b010;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b10001110100;
@@ -17458,7 +17513,7 @@ def S2_asl_i_vh : HInst<
(outs DoubleRegs:$Rdd32),
(ins DoubleRegs:$Rss32, u4_0Imm:$Ii),
"$Rdd32 = vaslh($Rss32,#$Ii)",
-S_2op_tc_1_SLOT23, TypeS_2op>, Enc_2082775 {
+tc_9c18c9a5, TypeS_2op>, Enc_12b6e9 {
let Inst{7-5} = 0b010;
let Inst{13-12} = 0b00;
let Inst{31-21} = 0b10000000100;
@@ -17467,7 +17522,7 @@ def S2_asl_i_vw : HInst<
(outs DoubleRegs:$Rdd32),
(ins DoubleRegs:$Rss32, u5_0Imm:$Ii),
"$Rdd32 = vaslw($Rss32,#$Ii)",
-S_2op_tc_1_SLOT23, TypeS_2op>, Enc_13201267 {
+tc_9c18c9a5, TypeS_2op>, Enc_7e5a82 {
let Inst{7-5} = 0b010;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b10000000010;
@@ -17476,7 +17531,7 @@ def S2_asl_r_p : HInst<
(outs DoubleRegs:$Rdd32),
(ins DoubleRegs:$Rss32, IntRegs:$Rt32),
"$Rdd32 = asl($Rss32,$Rt32)",
-S_3op_tc_1_SLOT23, TypeS_3op>, Enc_8940892 {
+tc_9c18c9a5, TypeS_3op>, Enc_927852 {
let Inst{7-5} = 0b100;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11000011100;
@@ -17485,7 +17540,7 @@ def S2_asl_r_p_acc : HInst<
(outs DoubleRegs:$Rxx32),
(ins DoubleRegs:$Rxx32in, DoubleRegs:$Rss32, IntRegs:$Rt32),
"$Rxx32 += asl($Rss32,$Rt32)",
-S_3op_tc_2_SLOT23, TypeS_3op>, Enc_7912540 {
+tc_c0cd91a8, TypeS_3op>, Enc_1aa186 {
let Inst{7-5} = 0b100;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11001011110;
@@ -17496,7 +17551,7 @@ def S2_asl_r_p_and : HInst<
(outs DoubleRegs:$Rxx32),
(ins DoubleRegs:$Rxx32in, DoubleRegs:$Rss32, IntRegs:$Rt32),
"$Rxx32 &= asl($Rss32,$Rt32)",
-S_3op_tc_2_SLOT23, TypeS_3op>, Enc_7912540 {
+tc_3c10f809, TypeS_3op>, Enc_1aa186 {
let Inst{7-5} = 0b100;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11001011010;
@@ -17507,7 +17562,7 @@ def S2_asl_r_p_nac : HInst<
(outs DoubleRegs:$Rxx32),
(ins DoubleRegs:$Rxx32in, DoubleRegs:$Rss32, IntRegs:$Rt32),
"$Rxx32 -= asl($Rss32,$Rt32)",
-S_3op_tc_2_SLOT23, TypeS_3op>, Enc_7912540 {
+tc_c0cd91a8, TypeS_3op>, Enc_1aa186 {
let Inst{7-5} = 0b100;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11001011100;
@@ -17518,7 +17573,7 @@ def S2_asl_r_p_or : HInst<
(outs DoubleRegs:$Rxx32),
(ins DoubleRegs:$Rxx32in, DoubleRegs:$Rss32, IntRegs:$Rt32),
"$Rxx32 |= asl($Rss32,$Rt32)",
-S_3op_tc_2_SLOT23, TypeS_3op>, Enc_7912540 {
+tc_3c10f809, TypeS_3op>, Enc_1aa186 {
let Inst{7-5} = 0b100;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11001011000;
@@ -17529,7 +17584,7 @@ def S2_asl_r_p_xor : HInst<
(outs DoubleRegs:$Rxx32),
(ins DoubleRegs:$Rxx32in, DoubleRegs:$Rss32, IntRegs:$Rt32),
"$Rxx32 ^= asl($Rss32,$Rt32)",
-S_3op_tc_2_SLOT23, TypeS_3op>, Enc_7912540 {
+tc_3c10f809, TypeS_3op>, Enc_1aa186 {
let Inst{7-5} = 0b100;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11001011011;
@@ -17540,7 +17595,7 @@ def S2_asl_r_r : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rs32, IntRegs:$Rt32),
"$Rd32 = asl($Rs32,$Rt32)",
-S_3op_tc_1_SLOT23, TypeS_3op>, Enc_14071773 {
+tc_9c18c9a5, TypeS_3op>, Enc_5ab2be {
let Inst{7-5} = 0b100;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11000110010;
@@ -17551,7 +17606,7 @@ def S2_asl_r_r_acc : HInst<
(outs IntRegs:$Rx32),
(ins IntRegs:$Rx32in, IntRegs:$Rs32, IntRegs:$Rt32),
"$Rx32 += asl($Rs32,$Rt32)",
-S_3op_tc_2_SLOT23, TypeS_3op>, Enc_9223889 {
+tc_c0cd91a8, TypeS_3op>, Enc_2ae154 {
let Inst{7-5} = 0b100;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11001100110;
@@ -17564,7 +17619,7 @@ def S2_asl_r_r_and : HInst<
(outs IntRegs:$Rx32),
(ins IntRegs:$Rx32in, IntRegs:$Rs32, IntRegs:$Rt32),
"$Rx32 &= asl($Rs32,$Rt32)",
-S_3op_tc_2_SLOT23, TypeS_3op>, Enc_9223889 {
+tc_3c10f809, TypeS_3op>, Enc_2ae154 {
let Inst{7-5} = 0b100;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11001100010;
@@ -17577,7 +17632,7 @@ def S2_asl_r_r_nac : HInst<
(outs IntRegs:$Rx32),
(ins IntRegs:$Rx32in, IntRegs:$Rs32, IntRegs:$Rt32),
"$Rx32 -= asl($Rs32,$Rt32)",
-S_3op_tc_2_SLOT23, TypeS_3op>, Enc_9223889 {
+tc_c0cd91a8, TypeS_3op>, Enc_2ae154 {
let Inst{7-5} = 0b100;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11001100100;
@@ -17590,7 +17645,7 @@ def S2_asl_r_r_or : HInst<
(outs IntRegs:$Rx32),
(ins IntRegs:$Rx32in, IntRegs:$Rs32, IntRegs:$Rt32),
"$Rx32 |= asl($Rs32,$Rt32)",
-S_3op_tc_2_SLOT23, TypeS_3op>, Enc_9223889 {
+tc_3c10f809, TypeS_3op>, Enc_2ae154 {
let Inst{7-5} = 0b100;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11001100000;
@@ -17603,19 +17658,20 @@ def S2_asl_r_r_sat : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rs32, IntRegs:$Rt32),
"$Rd32 = asl($Rs32,$Rt32):sat",
-S_3op_tc_2_SLOT23, TypeS_3op>, Enc_14071773 {
+tc_47ab9233, TypeS_3op>, Enc_5ab2be {
let Inst{7-5} = 0b100;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11000110000;
let hasNewValue = 1;
let opNewValue = 0;
+let prefersSlot3 = 1;
let Defs = [USR_OVF];
}
def S2_asl_r_vh : HInst<
(outs DoubleRegs:$Rdd32),
(ins DoubleRegs:$Rss32, IntRegs:$Rt32),
"$Rdd32 = vaslh($Rss32,$Rt32)",
-S_3op_tc_1_SLOT23, TypeS_3op>, Enc_8940892 {
+tc_9c18c9a5, TypeS_3op>, Enc_927852 {
let Inst{7-5} = 0b100;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11000011010;
@@ -17624,7 +17680,7 @@ def S2_asl_r_vw : HInst<
(outs DoubleRegs:$Rdd32),
(ins DoubleRegs:$Rss32, IntRegs:$Rt32),
"$Rdd32 = vaslw($Rss32,$Rt32)",
-S_3op_tc_1_SLOT23, TypeS_3op>, Enc_8940892 {
+tc_9c18c9a5, TypeS_3op>, Enc_927852 {
let Inst{7-5} = 0b100;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11000011000;
@@ -17633,7 +17689,7 @@ def S2_asr_i_p : HInst<
(outs DoubleRegs:$Rdd32),
(ins DoubleRegs:$Rss32, u6_0Imm:$Ii),
"$Rdd32 = asr($Rss32,#$Ii)",
-S_2op_tc_1_SLOT23, TypeS_2op>, Enc_4231995 {
+tc_9c18c9a5, TypeS_2op>, Enc_5eac98 {
let Inst{7-5} = 0b000;
let Inst{31-21} = 0b10000000000;
}
@@ -17641,7 +17697,7 @@ def S2_asr_i_p_acc : HInst<
(outs DoubleRegs:$Rxx32),
(ins DoubleRegs:$Rxx32in, DoubleRegs:$Rss32, u6_0Imm:$Ii),
"$Rxx32 += asr($Rss32,#$Ii)",
-S_2op_tc_2_SLOT23, TypeS_2op>, Enc_8497723 {
+tc_c0cd91a8, TypeS_2op>, Enc_70fb07 {
let Inst{7-5} = 0b100;
let Inst{31-21} = 0b10000010000;
let prefersSlot3 = 1;
@@ -17651,7 +17707,7 @@ def S2_asr_i_p_and : HInst<
(outs DoubleRegs:$Rxx32),
(ins DoubleRegs:$Rxx32in, DoubleRegs:$Rss32, u6_0Imm:$Ii),
"$Rxx32 &= asr($Rss32,#$Ii)",
-S_2op_tc_2_SLOT23, TypeS_2op>, Enc_8497723 {
+tc_3c10f809, TypeS_2op>, Enc_70fb07 {
let Inst{7-5} = 0b000;
let Inst{31-21} = 0b10000010010;
let prefersSlot3 = 1;
@@ -17661,7 +17717,7 @@ def S2_asr_i_p_nac : HInst<
(outs DoubleRegs:$Rxx32),
(ins DoubleRegs:$Rxx32in, DoubleRegs:$Rss32, u6_0Imm:$Ii),
"$Rxx32 -= asr($Rss32,#$Ii)",
-S_2op_tc_2_SLOT23, TypeS_2op>, Enc_8497723 {
+tc_c0cd91a8, TypeS_2op>, Enc_70fb07 {
let Inst{7-5} = 0b000;
let Inst{31-21} = 0b10000010000;
let prefersSlot3 = 1;
@@ -17671,7 +17727,7 @@ def S2_asr_i_p_or : HInst<
(outs DoubleRegs:$Rxx32),
(ins DoubleRegs:$Rxx32in, DoubleRegs:$Rss32, u6_0Imm:$Ii),
"$Rxx32 |= asr($Rss32,#$Ii)",
-S_2op_tc_2_SLOT23, TypeS_2op>, Enc_8497723 {
+tc_3c10f809, TypeS_2op>, Enc_70fb07 {
let Inst{7-5} = 0b100;
let Inst{31-21} = 0b10000010010;
let prefersSlot3 = 1;
@@ -17681,7 +17737,7 @@ def S2_asr_i_p_rnd : HInst<
(outs DoubleRegs:$Rdd32),
(ins DoubleRegs:$Rss32, u6_0Imm:$Ii),
"$Rdd32 = asr($Rss32,#$Ii):rnd",
-S_2op_tc_1_SLOT23, TypeS_2op>, Enc_4231995, Requires<[HasV5T]> {
+tc_63cd9d2d, TypeS_2op>, Enc_5eac98, Requires<[HasV5T]> {
let Inst{7-5} = 0b111;
let Inst{31-21} = 0b10000000110;
let prefersSlot3 = 1;
@@ -17690,14 +17746,14 @@ def S2_asr_i_p_rnd_goodsyntax : HInst<
(outs DoubleRegs:$Rdd32),
(ins DoubleRegs:$Rss32, u6_0Imm:$Ii),
"$Rdd32 = asrrnd($Rss32,#$Ii)",
-S_2op_tc_1_SLOT23, TypeS_2op>, Requires<[HasV5T]> {
+tc_63cd9d2d, TypeS_2op>, Requires<[HasV5T]> {
let isPseudo = 1;
}
def S2_asr_i_r : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rs32, u5_0Imm:$Ii),
"$Rd32 = asr($Rs32,#$Ii)",
-S_2op_tc_1_SLOT23, TypeS_2op>, Enc_2771456 {
+tc_9c18c9a5, TypeS_2op>, Enc_a05677 {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b10001100000;
@@ -17708,7 +17764,7 @@ def S2_asr_i_r_acc : HInst<
(outs IntRegs:$Rx32),
(ins IntRegs:$Rx32in, IntRegs:$Rs32, u5_0Imm:$Ii),
"$Rx32 += asr($Rs32,#$Ii)",
-S_2op_tc_2_SLOT23, TypeS_2op>, Enc_2410156 {
+tc_c0cd91a8, TypeS_2op>, Enc_28a2dc {
let Inst{7-5} = 0b100;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b10001110000;
@@ -17721,7 +17777,7 @@ def S2_asr_i_r_and : HInst<
(outs IntRegs:$Rx32),
(ins IntRegs:$Rx32in, IntRegs:$Rs32, u5_0Imm:$Ii),
"$Rx32 &= asr($Rs32,#$Ii)",
-S_2op_tc_2_SLOT23, TypeS_2op>, Enc_2410156 {
+tc_3c10f809, TypeS_2op>, Enc_28a2dc {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b10001110010;
@@ -17734,7 +17790,7 @@ def S2_asr_i_r_nac : HInst<
(outs IntRegs:$Rx32),
(ins IntRegs:$Rx32in, IntRegs:$Rs32, u5_0Imm:$Ii),
"$Rx32 -= asr($Rs32,#$Ii)",
-S_2op_tc_2_SLOT23, TypeS_2op>, Enc_2410156 {
+tc_c0cd91a8, TypeS_2op>, Enc_28a2dc {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b10001110000;
@@ -17747,7 +17803,7 @@ def S2_asr_i_r_or : HInst<
(outs IntRegs:$Rx32),
(ins IntRegs:$Rx32in, IntRegs:$Rs32, u5_0Imm:$Ii),
"$Rx32 |= asr($Rs32,#$Ii)",
-S_2op_tc_2_SLOT23, TypeS_2op>, Enc_2410156 {
+tc_3c10f809, TypeS_2op>, Enc_28a2dc {
let Inst{7-5} = 0b100;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b10001110010;
@@ -17760,7 +17816,7 @@ def S2_asr_i_r_rnd : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rs32, u5_0Imm:$Ii),
"$Rd32 = asr($Rs32,#$Ii):rnd",
-S_2op_tc_2_SLOT23, TypeS_2op>, Enc_2771456 {
+tc_63cd9d2d, TypeS_2op>, Enc_a05677 {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b10001100010;
@@ -17772,7 +17828,7 @@ def S2_asr_i_r_rnd_goodsyntax : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rs32, u5_0Imm:$Ii),
"$Rd32 = asrrnd($Rs32,#$Ii)",
-S_2op_tc_2_SLOT23, TypeS_2op> {
+tc_63cd9d2d, TypeS_2op> {
let hasNewValue = 1;
let opNewValue = 0;
let isPseudo = 1;
@@ -17781,18 +17837,19 @@ def S2_asr_i_svw_trun : HInst<
(outs IntRegs:$Rd32),
(ins DoubleRegs:$Rss32, u5_0Imm:$Ii),
"$Rd32 = vasrw($Rss32,#$Ii)",
-S_2op_tc_2_SLOT23, TypeS_2op>, Enc_2380082 {
+tc_7ca2ea10, TypeS_2op>, Enc_8dec2e {
let Inst{7-5} = 0b010;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b10001000110;
let hasNewValue = 1;
let opNewValue = 0;
+let prefersSlot3 = 1;
}
def S2_asr_i_vh : HInst<
(outs DoubleRegs:$Rdd32),
(ins DoubleRegs:$Rss32, u4_0Imm:$Ii),
"$Rdd32 = vasrh($Rss32,#$Ii)",
-S_2op_tc_1_SLOT23, TypeS_2op>, Enc_2082775 {
+tc_9c18c9a5, TypeS_2op>, Enc_12b6e9 {
let Inst{7-5} = 0b000;
let Inst{13-12} = 0b00;
let Inst{31-21} = 0b10000000100;
@@ -17801,7 +17858,7 @@ def S2_asr_i_vw : HInst<
(outs DoubleRegs:$Rdd32),
(ins DoubleRegs:$Rss32, u5_0Imm:$Ii),
"$Rdd32 = vasrw($Rss32,#$Ii)",
-S_2op_tc_1_SLOT23, TypeS_2op>, Enc_13201267 {
+tc_9c18c9a5, TypeS_2op>, Enc_7e5a82 {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b10000000010;
@@ -17810,7 +17867,7 @@ def S2_asr_r_p : HInst<
(outs DoubleRegs:$Rdd32),
(ins DoubleRegs:$Rss32, IntRegs:$Rt32),
"$Rdd32 = asr($Rss32,$Rt32)",
-S_3op_tc_1_SLOT23, TypeS_3op>, Enc_8940892 {
+tc_9c18c9a5, TypeS_3op>, Enc_927852 {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11000011100;
@@ -17819,7 +17876,7 @@ def S2_asr_r_p_acc : HInst<
(outs DoubleRegs:$Rxx32),
(ins DoubleRegs:$Rxx32in, DoubleRegs:$Rss32, IntRegs:$Rt32),
"$Rxx32 += asr($Rss32,$Rt32)",
-S_3op_tc_2_SLOT23, TypeS_3op>, Enc_7912540 {
+tc_c0cd91a8, TypeS_3op>, Enc_1aa186 {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11001011110;
@@ -17830,7 +17887,7 @@ def S2_asr_r_p_and : HInst<
(outs DoubleRegs:$Rxx32),
(ins DoubleRegs:$Rxx32in, DoubleRegs:$Rss32, IntRegs:$Rt32),
"$Rxx32 &= asr($Rss32,$Rt32)",
-S_3op_tc_2_SLOT23, TypeS_3op>, Enc_7912540 {
+tc_3c10f809, TypeS_3op>, Enc_1aa186 {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11001011010;
@@ -17841,7 +17898,7 @@ def S2_asr_r_p_nac : HInst<
(outs DoubleRegs:$Rxx32),
(ins DoubleRegs:$Rxx32in, DoubleRegs:$Rss32, IntRegs:$Rt32),
"$Rxx32 -= asr($Rss32,$Rt32)",
-S_3op_tc_2_SLOT23, TypeS_3op>, Enc_7912540 {
+tc_c0cd91a8, TypeS_3op>, Enc_1aa186 {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11001011100;
@@ -17852,7 +17909,7 @@ def S2_asr_r_p_or : HInst<
(outs DoubleRegs:$Rxx32),
(ins DoubleRegs:$Rxx32in, DoubleRegs:$Rss32, IntRegs:$Rt32),
"$Rxx32 |= asr($Rss32,$Rt32)",
-S_3op_tc_2_SLOT23, TypeS_3op>, Enc_7912540 {
+tc_3c10f809, TypeS_3op>, Enc_1aa186 {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11001011000;
@@ -17863,7 +17920,7 @@ def S2_asr_r_p_xor : HInst<
(outs DoubleRegs:$Rxx32),
(ins DoubleRegs:$Rxx32in, DoubleRegs:$Rss32, IntRegs:$Rt32),
"$Rxx32 ^= asr($Rss32,$Rt32)",
-S_3op_tc_2_SLOT23, TypeS_3op>, Enc_7912540 {
+tc_3c10f809, TypeS_3op>, Enc_1aa186 {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11001011011;
@@ -17874,7 +17931,7 @@ def S2_asr_r_r : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rs32, IntRegs:$Rt32),
"$Rd32 = asr($Rs32,$Rt32)",
-S_3op_tc_1_SLOT23, TypeS_3op>, Enc_14071773 {
+tc_9c18c9a5, TypeS_3op>, Enc_5ab2be {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11000110010;
@@ -17885,7 +17942,7 @@ def S2_asr_r_r_acc : HInst<
(outs IntRegs:$Rx32),
(ins IntRegs:$Rx32in, IntRegs:$Rs32, IntRegs:$Rt32),
"$Rx32 += asr($Rs32,$Rt32)",
-S_3op_tc_2_SLOT23, TypeS_3op>, Enc_9223889 {
+tc_c0cd91a8, TypeS_3op>, Enc_2ae154 {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11001100110;
@@ -17898,7 +17955,7 @@ def S2_asr_r_r_and : HInst<
(outs IntRegs:$Rx32),
(ins IntRegs:$Rx32in, IntRegs:$Rs32, IntRegs:$Rt32),
"$Rx32 &= asr($Rs32,$Rt32)",
-S_3op_tc_2_SLOT23, TypeS_3op>, Enc_9223889 {
+tc_3c10f809, TypeS_3op>, Enc_2ae154 {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11001100010;
@@ -17911,7 +17968,7 @@ def S2_asr_r_r_nac : HInst<
(outs IntRegs:$Rx32),
(ins IntRegs:$Rx32in, IntRegs:$Rs32, IntRegs:$Rt32),
"$Rx32 -= asr($Rs32,$Rt32)",
-S_3op_tc_2_SLOT23, TypeS_3op>, Enc_9223889 {
+tc_c0cd91a8, TypeS_3op>, Enc_2ae154 {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11001100100;
@@ -17924,7 +17981,7 @@ def S2_asr_r_r_or : HInst<
(outs IntRegs:$Rx32),
(ins IntRegs:$Rx32in, IntRegs:$Rs32, IntRegs:$Rt32),
"$Rx32 |= asr($Rs32,$Rt32)",
-S_3op_tc_2_SLOT23, TypeS_3op>, Enc_9223889 {
+tc_3c10f809, TypeS_3op>, Enc_2ae154 {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11001100000;
@@ -17937,30 +17994,32 @@ def S2_asr_r_r_sat : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rs32, IntRegs:$Rt32),
"$Rd32 = asr($Rs32,$Rt32):sat",
-S_3op_tc_2_SLOT23, TypeS_3op>, Enc_14071773 {
+tc_47ab9233, TypeS_3op>, Enc_5ab2be {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11000110000;
let hasNewValue = 1;
let opNewValue = 0;
+let prefersSlot3 = 1;
let Defs = [USR_OVF];
}
def S2_asr_r_svw_trun : HInst<
(outs IntRegs:$Rd32),
(ins DoubleRegs:$Rss32, IntRegs:$Rt32),
"$Rd32 = vasrw($Rss32,$Rt32)",
-S_3op_tc_1_SLOT23, TypeS_3op>, Enc_14287645 {
+tc_7ca2ea10, TypeS_3op>, Enc_3d5b28 {
let Inst{7-5} = 0b010;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11000101000;
let hasNewValue = 1;
let opNewValue = 0;
+let prefersSlot3 = 1;
}
def S2_asr_r_vh : HInst<
(outs DoubleRegs:$Rdd32),
(ins DoubleRegs:$Rss32, IntRegs:$Rt32),
"$Rdd32 = vasrh($Rss32,$Rt32)",
-S_3op_tc_1_SLOT23, TypeS_3op>, Enc_8940892 {
+tc_9c18c9a5, TypeS_3op>, Enc_927852 {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11000011010;
@@ -17969,7 +18028,7 @@ def S2_asr_r_vw : HInst<
(outs DoubleRegs:$Rdd32),
(ins DoubleRegs:$Rss32, IntRegs:$Rt32),
"$Rdd32 = vasrw($Rss32,$Rt32)",
-S_3op_tc_1_SLOT23, TypeS_3op>, Enc_8940892 {
+tc_9c18c9a5, TypeS_3op>, Enc_927852 {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11000011000;
@@ -17978,25 +18037,27 @@ def S2_brev : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rs32),
"$Rd32 = brev($Rs32)",
-S_2op_tc_2_SLOT23, TypeS_2op>, Enc_4075554 {
+tc_ab1b5e74, TypeS_2op>, Enc_5e2823 {
let Inst{13-5} = 0b000000110;
let Inst{31-21} = 0b10001100010;
let hasNewValue = 1;
let opNewValue = 0;
+let prefersSlot3 = 1;
}
def S2_brevp : HInst<
(outs DoubleRegs:$Rdd32),
(ins DoubleRegs:$Rss32),
"$Rdd32 = brev($Rss32)",
-S_2op_tc_1_SLOT23, TypeS_2op>, Enc_13133231 {
+tc_ab1b5e74, TypeS_2op>, Enc_b9c5fb {
let Inst{13-5} = 0b000000110;
let Inst{31-21} = 0b10000000110;
+let prefersSlot3 = 1;
}
def S2_cabacdecbin : HInst<
(outs DoubleRegs:$Rdd32),
(ins DoubleRegs:$Rss32, DoubleRegs:$Rtt32),
"$Rdd32 = decbin($Rss32,$Rtt32)",
-S_3op_tc_1_SLOT23, TypeS_3op>, Enc_8333157 {
+tc_5d806107, TypeS_3op>, Enc_a56825 {
let Inst{7-5} = 0b110;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11000001110;
@@ -18008,77 +18069,84 @@ def S2_cl0 : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rs32),
"$Rd32 = cl0($Rs32)",
-S_2op_tc_1_SLOT23, TypeS_2op>, Enc_4075554 {
+tc_ab1b5e74, TypeS_2op>, Enc_5e2823 {
let Inst{13-5} = 0b000000101;
let Inst{31-21} = 0b10001100000;
let hasNewValue = 1;
let opNewValue = 0;
+let prefersSlot3 = 1;
}
def S2_cl0p : HInst<
(outs IntRegs:$Rd32),
(ins DoubleRegs:$Rss32),
"$Rd32 = cl0($Rss32)",
-S_2op_tc_1_SLOT23, TypeS_2op>, Enc_3742184 {
+tc_ab1b5e74, TypeS_2op>, Enc_90cd8b {
let Inst{13-5} = 0b000000010;
let Inst{31-21} = 0b10001000010;
let hasNewValue = 1;
let opNewValue = 0;
+let prefersSlot3 = 1;
}
def S2_cl1 : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rs32),
"$Rd32 = cl1($Rs32)",
-S_2op_tc_1_SLOT23, TypeS_2op>, Enc_4075554 {
+tc_ab1b5e74, TypeS_2op>, Enc_5e2823 {
let Inst{13-5} = 0b000000110;
let Inst{31-21} = 0b10001100000;
let hasNewValue = 1;
let opNewValue = 0;
+let prefersSlot3 = 1;
}
def S2_cl1p : HInst<
(outs IntRegs:$Rd32),
(ins DoubleRegs:$Rss32),
"$Rd32 = cl1($Rss32)",
-S_2op_tc_1_SLOT23, TypeS_2op>, Enc_3742184 {
+tc_ab1b5e74, TypeS_2op>, Enc_90cd8b {
let Inst{13-5} = 0b000000100;
let Inst{31-21} = 0b10001000010;
let hasNewValue = 1;
let opNewValue = 0;
+let prefersSlot3 = 1;
}
def S2_clb : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rs32),
"$Rd32 = clb($Rs32)",
-S_2op_tc_1_SLOT23, TypeS_2op>, Enc_4075554 {
+tc_ab1b5e74, TypeS_2op>, Enc_5e2823 {
let Inst{13-5} = 0b000000100;
let Inst{31-21} = 0b10001100000;
let hasNewValue = 1;
let opNewValue = 0;
+let prefersSlot3 = 1;
}
def S2_clbnorm : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rs32),
"$Rd32 = normamt($Rs32)",
-S_2op_tc_1_SLOT23, TypeS_2op>, Enc_4075554 {
+tc_ab1b5e74, TypeS_2op>, Enc_5e2823 {
let Inst{13-5} = 0b000000111;
let Inst{31-21} = 0b10001100000;
let hasNewValue = 1;
let opNewValue = 0;
+let prefersSlot3 = 1;
}
def S2_clbp : HInst<
(outs IntRegs:$Rd32),
(ins DoubleRegs:$Rss32),
"$Rd32 = clb($Rss32)",
-S_2op_tc_1_SLOT23, TypeS_2op>, Enc_3742184 {
+tc_ab1b5e74, TypeS_2op>, Enc_90cd8b {
let Inst{13-5} = 0b000000000;
let Inst{31-21} = 0b10001000010;
let hasNewValue = 1;
let opNewValue = 0;
+let prefersSlot3 = 1;
}
def S2_clrbit_i : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rs32, u5_0Imm:$Ii),
"$Rd32 = clrbit($Rs32,#$Ii)",
-S_2op_tc_1_SLOT23, TypeS_2op>, Enc_2771456 {
+tc_9c18c9a5, TypeS_2op>, Enc_a05677 {
let Inst{7-5} = 0b001;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b10001100110;
@@ -18089,7 +18157,7 @@ def S2_clrbit_r : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rs32, IntRegs:$Rt32),
"$Rd32 = clrbit($Rs32,$Rt32)",
-S_3op_tc_1_SLOT23, TypeS_3op>, Enc_14071773 {
+tc_9c18c9a5, TypeS_3op>, Enc_5ab2be {
let Inst{7-5} = 0b010;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11000110100;
@@ -18100,55 +18168,60 @@ def S2_ct0 : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rs32),
"$Rd32 = ct0($Rs32)",
-S_2op_tc_1_SLOT23, TypeS_2op>, Enc_4075554 {
+tc_ab1b5e74, TypeS_2op>, Enc_5e2823 {
let Inst{13-5} = 0b000000100;
let Inst{31-21} = 0b10001100010;
let hasNewValue = 1;
let opNewValue = 0;
+let prefersSlot3 = 1;
}
def S2_ct0p : HInst<
(outs IntRegs:$Rd32),
(ins DoubleRegs:$Rss32),
"$Rd32 = ct0($Rss32)",
-S_2op_tc_1_SLOT23, TypeS_2op>, Enc_3742184 {
+tc_ab1b5e74, TypeS_2op>, Enc_90cd8b {
let Inst{13-5} = 0b000000010;
let Inst{31-21} = 0b10001000111;
let hasNewValue = 1;
let opNewValue = 0;
+let prefersSlot3 = 1;
}
def S2_ct1 : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rs32),
"$Rd32 = ct1($Rs32)",
-S_2op_tc_1_SLOT23, TypeS_2op>, Enc_4075554 {
+tc_ab1b5e74, TypeS_2op>, Enc_5e2823 {
let Inst{13-5} = 0b000000101;
let Inst{31-21} = 0b10001100010;
let hasNewValue = 1;
let opNewValue = 0;
+let prefersSlot3 = 1;
}
def S2_ct1p : HInst<
(outs IntRegs:$Rd32),
(ins DoubleRegs:$Rss32),
"$Rd32 = ct1($Rss32)",
-S_2op_tc_1_SLOT23, TypeS_2op>, Enc_3742184 {
+tc_ab1b5e74, TypeS_2op>, Enc_90cd8b {
let Inst{13-5} = 0b000000100;
let Inst{31-21} = 0b10001000111;
let hasNewValue = 1;
let opNewValue = 0;
+let prefersSlot3 = 1;
}
def S2_deinterleave : HInst<
(outs DoubleRegs:$Rdd32),
(ins DoubleRegs:$Rss32),
"$Rdd32 = deinterleave($Rss32)",
-S_2op_tc_1_SLOT23, TypeS_2op>, Enc_13133231 {
+tc_ab1b5e74, TypeS_2op>, Enc_b9c5fb {
let Inst{13-5} = 0b000000100;
let Inst{31-21} = 0b10000000110;
+let prefersSlot3 = 1;
}
def S2_extractu : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rs32, u5_0Imm:$Ii, u5_0Imm:$II),
"$Rd32 = extractu($Rs32,#$Ii,#$II)",
-S_2op_tc_2_SLOT23, TypeS_2op>, Enc_11930928 {
+tc_c0cd91a8, TypeS_2op>, Enc_b388cf {
let Inst{13-13} = 0b0;
let Inst{31-23} = 0b100011010;
let hasNewValue = 1;
@@ -18159,7 +18232,7 @@ def S2_extractu_rp : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rs32, DoubleRegs:$Rtt32),
"$Rd32 = extractu($Rs32,$Rtt32)",
-S_3op_tc_2_SLOT23, TypeS_3op>, Enc_15472748 {
+tc_87601822, TypeS_3op>, Enc_e07374 {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11001001000;
@@ -18171,7 +18244,7 @@ def S2_extractup : HInst<
(outs DoubleRegs:$Rdd32),
(ins DoubleRegs:$Rss32, u6_0Imm:$Ii, u6_0Imm:$II),
"$Rdd32 = extractu($Rss32,#$Ii,#$II)",
-S_2op_tc_2_SLOT23, TypeS_2op>, Enc_9894557 {
+tc_c0cd91a8, TypeS_2op>, Enc_b84c4c {
let Inst{31-24} = 0b10000001;
let prefersSlot3 = 1;
}
@@ -18179,7 +18252,7 @@ def S2_extractup_rp : HInst<
(outs DoubleRegs:$Rdd32),
(ins DoubleRegs:$Rss32, DoubleRegs:$Rtt32),
"$Rdd32 = extractu($Rss32,$Rtt32)",
-S_3op_tc_2_SLOT23, TypeS_3op>, Enc_8333157 {
+tc_87601822, TypeS_3op>, Enc_a56825 {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11000001000;
@@ -18189,56 +18262,61 @@ def S2_insert : HInst<
(outs IntRegs:$Rx32),
(ins IntRegs:$Rx32in, IntRegs:$Rs32, u5_0Imm:$Ii, u5_0Imm:$II),
"$Rx32 = insert($Rs32,#$Ii,#$II)",
-S_2op_tc_2_SLOT23, TypeS_2op>, Enc_2880796 {
+tc_d95f4e98, TypeS_2op>, Enc_a1e29d {
let Inst{13-13} = 0b0;
let Inst{31-23} = 0b100011110;
let hasNewValue = 1;
let opNewValue = 0;
+let prefersSlot3 = 1;
let Constraints = "$Rx32 = $Rx32in";
}
def S2_insert_rp : HInst<
(outs IntRegs:$Rx32),
(ins IntRegs:$Rx32in, IntRegs:$Rs32, DoubleRegs:$Rtt32),
"$Rx32 = insert($Rs32,$Rtt32)",
-S_3op_tc_1_SLOT23, TypeS_3op>, Enc_16311032 {
+tc_3c10f809, TypeS_3op>, Enc_179b35 {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11001000000;
let hasNewValue = 1;
let opNewValue = 0;
+let prefersSlot3 = 1;
let Constraints = "$Rx32 = $Rx32in";
}
def S2_insertp : HInst<
(outs DoubleRegs:$Rxx32),
(ins DoubleRegs:$Rxx32in, DoubleRegs:$Rss32, u6_0Imm:$Ii, u6_0Imm:$II),
"$Rxx32 = insert($Rss32,#$Ii,#$II)",
-S_2op_tc_2_SLOT23, TypeS_2op>, Enc_631197 {
+tc_d95f4e98, TypeS_2op>, Enc_143a3c {
let Inst{31-24} = 0b10000011;
+let prefersSlot3 = 1;
let Constraints = "$Rxx32 = $Rxx32in";
}
def S2_insertp_rp : HInst<
(outs DoubleRegs:$Rxx32),
(ins DoubleRegs:$Rxx32in, DoubleRegs:$Rss32, DoubleRegs:$Rtt32),
"$Rxx32 = insert($Rss32,$Rtt32)",
-S_3op_tc_1_SLOT23, TypeS_3op>, Enc_12702821 {
+tc_3c10f809, TypeS_3op>, Enc_88c16c {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11001010000;
+let prefersSlot3 = 1;
let Constraints = "$Rxx32 = $Rxx32in";
}
def S2_interleave : HInst<
(outs DoubleRegs:$Rdd32),
(ins DoubleRegs:$Rss32),
"$Rdd32 = interleave($Rss32)",
-S_2op_tc_1_SLOT23, TypeS_2op>, Enc_13133231 {
+tc_ab1b5e74, TypeS_2op>, Enc_b9c5fb {
let Inst{13-5} = 0b000000101;
let Inst{31-21} = 0b10000000110;
+let prefersSlot3 = 1;
}
def S2_lfsp : HInst<
(outs DoubleRegs:$Rdd32),
(ins DoubleRegs:$Rss32, DoubleRegs:$Rtt32),
"$Rdd32 = lfs($Rss32,$Rtt32)",
-S_3op_tc_2_SLOT23, TypeS_3op>, Enc_8333157 {
+tc_87601822, TypeS_3op>, Enc_a56825 {
let Inst{7-5} = 0b110;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11000001100;
@@ -18248,7 +18326,7 @@ def S2_lsl_r_p : HInst<
(outs DoubleRegs:$Rdd32),
(ins DoubleRegs:$Rss32, IntRegs:$Rt32),
"$Rdd32 = lsl($Rss32,$Rt32)",
-S_3op_tc_1_SLOT23, TypeS_3op>, Enc_8940892 {
+tc_9c18c9a5, TypeS_3op>, Enc_927852 {
let Inst{7-5} = 0b110;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11000011100;
@@ -18257,7 +18335,7 @@ def S2_lsl_r_p_acc : HInst<
(outs DoubleRegs:$Rxx32),
(ins DoubleRegs:$Rxx32in, DoubleRegs:$Rss32, IntRegs:$Rt32),
"$Rxx32 += lsl($Rss32,$Rt32)",
-S_3op_tc_2_SLOT23, TypeS_3op>, Enc_7912540 {
+tc_c0cd91a8, TypeS_3op>, Enc_1aa186 {
let Inst{7-5} = 0b110;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11001011110;
@@ -18268,7 +18346,7 @@ def S2_lsl_r_p_and : HInst<
(outs DoubleRegs:$Rxx32),
(ins DoubleRegs:$Rxx32in, DoubleRegs:$Rss32, IntRegs:$Rt32),
"$Rxx32 &= lsl($Rss32,$Rt32)",
-S_3op_tc_2_SLOT23, TypeS_3op>, Enc_7912540 {
+tc_3c10f809, TypeS_3op>, Enc_1aa186 {
let Inst{7-5} = 0b110;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11001011010;
@@ -18279,7 +18357,7 @@ def S2_lsl_r_p_nac : HInst<
(outs DoubleRegs:$Rxx32),
(ins DoubleRegs:$Rxx32in, DoubleRegs:$Rss32, IntRegs:$Rt32),
"$Rxx32 -= lsl($Rss32,$Rt32)",
-S_3op_tc_2_SLOT23, TypeS_3op>, Enc_7912540 {
+tc_c0cd91a8, TypeS_3op>, Enc_1aa186 {
let Inst{7-5} = 0b110;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11001011100;
@@ -18290,7 +18368,7 @@ def S2_lsl_r_p_or : HInst<
(outs DoubleRegs:$Rxx32),
(ins DoubleRegs:$Rxx32in, DoubleRegs:$Rss32, IntRegs:$Rt32),
"$Rxx32 |= lsl($Rss32,$Rt32)",
-S_3op_tc_2_SLOT23, TypeS_3op>, Enc_7912540 {
+tc_3c10f809, TypeS_3op>, Enc_1aa186 {
let Inst{7-5} = 0b110;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11001011000;
@@ -18301,7 +18379,7 @@ def S2_lsl_r_p_xor : HInst<
(outs DoubleRegs:$Rxx32),
(ins DoubleRegs:$Rxx32in, DoubleRegs:$Rss32, IntRegs:$Rt32),
"$Rxx32 ^= lsl($Rss32,$Rt32)",
-S_3op_tc_2_SLOT23, TypeS_3op>, Enc_7912540 {
+tc_3c10f809, TypeS_3op>, Enc_1aa186 {
let Inst{7-5} = 0b110;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11001011011;
@@ -18312,7 +18390,7 @@ def S2_lsl_r_r : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rs32, IntRegs:$Rt32),
"$Rd32 = lsl($Rs32,$Rt32)",
-S_3op_tc_1_SLOT23, TypeS_3op>, Enc_14071773 {
+tc_9c18c9a5, TypeS_3op>, Enc_5ab2be {
let Inst{7-5} = 0b110;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11000110010;
@@ -18323,7 +18401,7 @@ def S2_lsl_r_r_acc : HInst<
(outs IntRegs:$Rx32),
(ins IntRegs:$Rx32in, IntRegs:$Rs32, IntRegs:$Rt32),
"$Rx32 += lsl($Rs32,$Rt32)",
-S_3op_tc_2_SLOT23, TypeS_3op>, Enc_9223889 {
+tc_c0cd91a8, TypeS_3op>, Enc_2ae154 {
let Inst{7-5} = 0b110;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11001100110;
@@ -18336,7 +18414,7 @@ def S2_lsl_r_r_and : HInst<
(outs IntRegs:$Rx32),
(ins IntRegs:$Rx32in, IntRegs:$Rs32, IntRegs:$Rt32),
"$Rx32 &= lsl($Rs32,$Rt32)",
-S_3op_tc_2_SLOT23, TypeS_3op>, Enc_9223889 {
+tc_3c10f809, TypeS_3op>, Enc_2ae154 {
let Inst{7-5} = 0b110;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11001100010;
@@ -18349,7 +18427,7 @@ def S2_lsl_r_r_nac : HInst<
(outs IntRegs:$Rx32),
(ins IntRegs:$Rx32in, IntRegs:$Rs32, IntRegs:$Rt32),
"$Rx32 -= lsl($Rs32,$Rt32)",
-S_3op_tc_2_SLOT23, TypeS_3op>, Enc_9223889 {
+tc_c0cd91a8, TypeS_3op>, Enc_2ae154 {
let Inst{7-5} = 0b110;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11001100100;
@@ -18362,7 +18440,7 @@ def S2_lsl_r_r_or : HInst<
(outs IntRegs:$Rx32),
(ins IntRegs:$Rx32in, IntRegs:$Rs32, IntRegs:$Rt32),
"$Rx32 |= lsl($Rs32,$Rt32)",
-S_3op_tc_2_SLOT23, TypeS_3op>, Enc_9223889 {
+tc_3c10f809, TypeS_3op>, Enc_2ae154 {
let Inst{7-5} = 0b110;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11001100000;
@@ -18375,7 +18453,7 @@ def S2_lsl_r_vh : HInst<
(outs DoubleRegs:$Rdd32),
(ins DoubleRegs:$Rss32, IntRegs:$Rt32),
"$Rdd32 = vlslh($Rss32,$Rt32)",
-S_3op_tc_1_SLOT23, TypeS_3op>, Enc_8940892 {
+tc_9c18c9a5, TypeS_3op>, Enc_927852 {
let Inst{7-5} = 0b110;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11000011010;
@@ -18384,7 +18462,7 @@ def S2_lsl_r_vw : HInst<
(outs DoubleRegs:$Rdd32),
(ins DoubleRegs:$Rss32, IntRegs:$Rt32),
"$Rdd32 = vlslw($Rss32,$Rt32)",
-S_3op_tc_1_SLOT23, TypeS_3op>, Enc_8940892 {
+tc_9c18c9a5, TypeS_3op>, Enc_927852 {
let Inst{7-5} = 0b110;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11000011000;
@@ -18393,7 +18471,7 @@ def S2_lsr_i_p : HInst<
(outs DoubleRegs:$Rdd32),
(ins DoubleRegs:$Rss32, u6_0Imm:$Ii),
"$Rdd32 = lsr($Rss32,#$Ii)",
-S_2op_tc_1_SLOT23, TypeS_2op>, Enc_4231995 {
+tc_9c18c9a5, TypeS_2op>, Enc_5eac98 {
let Inst{7-5} = 0b001;
let Inst{31-21} = 0b10000000000;
}
@@ -18401,7 +18479,7 @@ def S2_lsr_i_p_acc : HInst<
(outs DoubleRegs:$Rxx32),
(ins DoubleRegs:$Rxx32in, DoubleRegs:$Rss32, u6_0Imm:$Ii),
"$Rxx32 += lsr($Rss32,#$Ii)",
-S_2op_tc_2_SLOT23, TypeS_2op>, Enc_8497723 {
+tc_c0cd91a8, TypeS_2op>, Enc_70fb07 {
let Inst{7-5} = 0b101;
let Inst{31-21} = 0b10000010000;
let prefersSlot3 = 1;
@@ -18411,7 +18489,7 @@ def S2_lsr_i_p_and : HInst<
(outs DoubleRegs:$Rxx32),
(ins DoubleRegs:$Rxx32in, DoubleRegs:$Rss32, u6_0Imm:$Ii),
"$Rxx32 &= lsr($Rss32,#$Ii)",
-S_2op_tc_2_SLOT23, TypeS_2op>, Enc_8497723 {
+tc_3c10f809, TypeS_2op>, Enc_70fb07 {
let Inst{7-5} = 0b001;
let Inst{31-21} = 0b10000010010;
let prefersSlot3 = 1;
@@ -18421,7 +18499,7 @@ def S2_lsr_i_p_nac : HInst<
(outs DoubleRegs:$Rxx32),
(ins DoubleRegs:$Rxx32in, DoubleRegs:$Rss32, u6_0Imm:$Ii),
"$Rxx32 -= lsr($Rss32,#$Ii)",
-S_2op_tc_2_SLOT23, TypeS_2op>, Enc_8497723 {
+tc_c0cd91a8, TypeS_2op>, Enc_70fb07 {
let Inst{7-5} = 0b001;
let Inst{31-21} = 0b10000010000;
let prefersSlot3 = 1;
@@ -18431,7 +18509,7 @@ def S2_lsr_i_p_or : HInst<
(outs DoubleRegs:$Rxx32),
(ins DoubleRegs:$Rxx32in, DoubleRegs:$Rss32, u6_0Imm:$Ii),
"$Rxx32 |= lsr($Rss32,#$Ii)",
-S_2op_tc_2_SLOT23, TypeS_2op>, Enc_8497723 {
+tc_3c10f809, TypeS_2op>, Enc_70fb07 {
let Inst{7-5} = 0b101;
let Inst{31-21} = 0b10000010010;
let prefersSlot3 = 1;
@@ -18441,7 +18519,7 @@ def S2_lsr_i_p_xacc : HInst<
(outs DoubleRegs:$Rxx32),
(ins DoubleRegs:$Rxx32in, DoubleRegs:$Rss32, u6_0Imm:$Ii),
"$Rxx32 ^= lsr($Rss32,#$Ii)",
-S_2op_tc_2_SLOT23, TypeS_2op>, Enc_8497723 {
+tc_3c10f809, TypeS_2op>, Enc_70fb07 {
let Inst{7-5} = 0b001;
let Inst{31-21} = 0b10000010100;
let prefersSlot3 = 1;
@@ -18451,7 +18529,7 @@ def S2_lsr_i_r : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rs32, u5_0Imm:$Ii),
"$Rd32 = lsr($Rs32,#$Ii)",
-S_2op_tc_1_SLOT23, TypeS_2op>, Enc_2771456 {
+tc_9c18c9a5, TypeS_2op>, Enc_a05677 {
let Inst{7-5} = 0b001;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b10001100000;
@@ -18462,7 +18540,7 @@ def S2_lsr_i_r_acc : HInst<
(outs IntRegs:$Rx32),
(ins IntRegs:$Rx32in, IntRegs:$Rs32, u5_0Imm:$Ii),
"$Rx32 += lsr($Rs32,#$Ii)",
-S_2op_tc_2_SLOT23, TypeS_2op>, Enc_2410156 {
+tc_c0cd91a8, TypeS_2op>, Enc_28a2dc {
let Inst{7-5} = 0b101;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b10001110000;
@@ -18475,7 +18553,7 @@ def S2_lsr_i_r_and : HInst<
(outs IntRegs:$Rx32),
(ins IntRegs:$Rx32in, IntRegs:$Rs32, u5_0Imm:$Ii),
"$Rx32 &= lsr($Rs32,#$Ii)",
-S_2op_tc_2_SLOT23, TypeS_2op>, Enc_2410156 {
+tc_3c10f809, TypeS_2op>, Enc_28a2dc {
let Inst{7-5} = 0b001;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b10001110010;
@@ -18488,7 +18566,7 @@ def S2_lsr_i_r_nac : HInst<
(outs IntRegs:$Rx32),
(ins IntRegs:$Rx32in, IntRegs:$Rs32, u5_0Imm:$Ii),
"$Rx32 -= lsr($Rs32,#$Ii)",
-S_2op_tc_2_SLOT23, TypeS_2op>, Enc_2410156 {
+tc_c0cd91a8, TypeS_2op>, Enc_28a2dc {
let Inst{7-5} = 0b001;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b10001110000;
@@ -18501,7 +18579,7 @@ def S2_lsr_i_r_or : HInst<
(outs IntRegs:$Rx32),
(ins IntRegs:$Rx32in, IntRegs:$Rs32, u5_0Imm:$Ii),
"$Rx32 |= lsr($Rs32,#$Ii)",
-S_2op_tc_2_SLOT23, TypeS_2op>, Enc_2410156 {
+tc_3c10f809, TypeS_2op>, Enc_28a2dc {
let Inst{7-5} = 0b101;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b10001110010;
@@ -18514,7 +18592,7 @@ def S2_lsr_i_r_xacc : HInst<
(outs IntRegs:$Rx32),
(ins IntRegs:$Rx32in, IntRegs:$Rs32, u5_0Imm:$Ii),
"$Rx32 ^= lsr($Rs32,#$Ii)",
-S_2op_tc_2_SLOT23, TypeS_2op>, Enc_2410156 {
+tc_3c10f809, TypeS_2op>, Enc_28a2dc {
let Inst{7-5} = 0b001;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b10001110100;
@@ -18527,7 +18605,7 @@ def S2_lsr_i_vh : HInst<
(outs DoubleRegs:$Rdd32),
(ins DoubleRegs:$Rss32, u4_0Imm:$Ii),
"$Rdd32 = vlsrh($Rss32,#$Ii)",
-S_2op_tc_1_SLOT23, TypeS_2op>, Enc_2082775 {
+tc_9c18c9a5, TypeS_2op>, Enc_12b6e9 {
let Inst{7-5} = 0b001;
let Inst{13-12} = 0b00;
let Inst{31-21} = 0b10000000100;
@@ -18536,7 +18614,7 @@ def S2_lsr_i_vw : HInst<
(outs DoubleRegs:$Rdd32),
(ins DoubleRegs:$Rss32, u5_0Imm:$Ii),
"$Rdd32 = vlsrw($Rss32,#$Ii)",
-S_2op_tc_1_SLOT23, TypeS_2op>, Enc_13201267 {
+tc_9c18c9a5, TypeS_2op>, Enc_7e5a82 {
let Inst{7-5} = 0b001;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b10000000010;
@@ -18545,7 +18623,7 @@ def S2_lsr_r_p : HInst<
(outs DoubleRegs:$Rdd32),
(ins DoubleRegs:$Rss32, IntRegs:$Rt32),
"$Rdd32 = lsr($Rss32,$Rt32)",
-S_3op_tc_1_SLOT23, TypeS_3op>, Enc_8940892 {
+tc_9c18c9a5, TypeS_3op>, Enc_927852 {
let Inst{7-5} = 0b010;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11000011100;
@@ -18554,7 +18632,7 @@ def S2_lsr_r_p_acc : HInst<
(outs DoubleRegs:$Rxx32),
(ins DoubleRegs:$Rxx32in, DoubleRegs:$Rss32, IntRegs:$Rt32),
"$Rxx32 += lsr($Rss32,$Rt32)",
-S_3op_tc_2_SLOT23, TypeS_3op>, Enc_7912540 {
+tc_c0cd91a8, TypeS_3op>, Enc_1aa186 {
let Inst{7-5} = 0b010;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11001011110;
@@ -18565,7 +18643,7 @@ def S2_lsr_r_p_and : HInst<
(outs DoubleRegs:$Rxx32),
(ins DoubleRegs:$Rxx32in, DoubleRegs:$Rss32, IntRegs:$Rt32),
"$Rxx32 &= lsr($Rss32,$Rt32)",
-S_3op_tc_2_SLOT23, TypeS_3op>, Enc_7912540 {
+tc_3c10f809, TypeS_3op>, Enc_1aa186 {
let Inst{7-5} = 0b010;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11001011010;
@@ -18576,7 +18654,7 @@ def S2_lsr_r_p_nac : HInst<
(outs DoubleRegs:$Rxx32),
(ins DoubleRegs:$Rxx32in, DoubleRegs:$Rss32, IntRegs:$Rt32),
"$Rxx32 -= lsr($Rss32,$Rt32)",
-S_3op_tc_2_SLOT23, TypeS_3op>, Enc_7912540 {
+tc_c0cd91a8, TypeS_3op>, Enc_1aa186 {
let Inst{7-5} = 0b010;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11001011100;
@@ -18587,7 +18665,7 @@ def S2_lsr_r_p_or : HInst<
(outs DoubleRegs:$Rxx32),
(ins DoubleRegs:$Rxx32in, DoubleRegs:$Rss32, IntRegs:$Rt32),
"$Rxx32 |= lsr($Rss32,$Rt32)",
-S_3op_tc_2_SLOT23, TypeS_3op>, Enc_7912540 {
+tc_3c10f809, TypeS_3op>, Enc_1aa186 {
let Inst{7-5} = 0b010;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11001011000;
@@ -18598,7 +18676,7 @@ def S2_lsr_r_p_xor : HInst<
(outs DoubleRegs:$Rxx32),
(ins DoubleRegs:$Rxx32in, DoubleRegs:$Rss32, IntRegs:$Rt32),
"$Rxx32 ^= lsr($Rss32,$Rt32)",
-S_3op_tc_2_SLOT23, TypeS_3op>, Enc_7912540 {
+tc_3c10f809, TypeS_3op>, Enc_1aa186 {
let Inst{7-5} = 0b010;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11001011011;
@@ -18609,7 +18687,7 @@ def S2_lsr_r_r : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rs32, IntRegs:$Rt32),
"$Rd32 = lsr($Rs32,$Rt32)",
-S_3op_tc_1_SLOT23, TypeS_3op>, Enc_14071773 {
+tc_9c18c9a5, TypeS_3op>, Enc_5ab2be {
let Inst{7-5} = 0b010;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11000110010;
@@ -18620,7 +18698,7 @@ def S2_lsr_r_r_acc : HInst<
(outs IntRegs:$Rx32),
(ins IntRegs:$Rx32in, IntRegs:$Rs32, IntRegs:$Rt32),
"$Rx32 += lsr($Rs32,$Rt32)",
-S_3op_tc_2_SLOT23, TypeS_3op>, Enc_9223889 {
+tc_c0cd91a8, TypeS_3op>, Enc_2ae154 {
let Inst{7-5} = 0b010;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11001100110;
@@ -18633,7 +18711,7 @@ def S2_lsr_r_r_and : HInst<
(outs IntRegs:$Rx32),
(ins IntRegs:$Rx32in, IntRegs:$Rs32, IntRegs:$Rt32),
"$Rx32 &= lsr($Rs32,$Rt32)",
-S_3op_tc_2_SLOT23, TypeS_3op>, Enc_9223889 {
+tc_3c10f809, TypeS_3op>, Enc_2ae154 {
let Inst{7-5} = 0b010;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11001100010;
@@ -18646,7 +18724,7 @@ def S2_lsr_r_r_nac : HInst<
(outs IntRegs:$Rx32),
(ins IntRegs:$Rx32in, IntRegs:$Rs32, IntRegs:$Rt32),
"$Rx32 -= lsr($Rs32,$Rt32)",
-S_3op_tc_2_SLOT23, TypeS_3op>, Enc_9223889 {
+tc_c0cd91a8, TypeS_3op>, Enc_2ae154 {
let Inst{7-5} = 0b010;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11001100100;
@@ -18659,7 +18737,7 @@ def S2_lsr_r_r_or : HInst<
(outs IntRegs:$Rx32),
(ins IntRegs:$Rx32in, IntRegs:$Rs32, IntRegs:$Rt32),
"$Rx32 |= lsr($Rs32,$Rt32)",
-S_3op_tc_2_SLOT23, TypeS_3op>, Enc_9223889 {
+tc_3c10f809, TypeS_3op>, Enc_2ae154 {
let Inst{7-5} = 0b010;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11001100000;
@@ -18672,7 +18750,7 @@ def S2_lsr_r_vh : HInst<
(outs DoubleRegs:$Rdd32),
(ins DoubleRegs:$Rss32, IntRegs:$Rt32),
"$Rdd32 = vlsrh($Rss32,$Rt32)",
-S_3op_tc_1_SLOT23, TypeS_3op>, Enc_8940892 {
+tc_9c18c9a5, TypeS_3op>, Enc_927852 {
let Inst{7-5} = 0b010;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11000011010;
@@ -18681,7 +18759,7 @@ def S2_lsr_r_vw : HInst<
(outs DoubleRegs:$Rdd32),
(ins DoubleRegs:$Rss32, IntRegs:$Rt32),
"$Rdd32 = vlsrw($Rss32,$Rt32)",
-S_3op_tc_1_SLOT23, TypeS_3op>, Enc_8940892 {
+tc_9c18c9a5, TypeS_3op>, Enc_927852 {
let Inst{7-5} = 0b010;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11000011000;
@@ -18690,7 +18768,7 @@ def S2_packhl : HInst<
(outs DoubleRegs:$Rdd32),
(ins IntRegs:$Rs32, IntRegs:$Rt32),
"$Rdd32 = packhl($Rs32,$Rt32)",
-ALU32_3op_tc_1_SLOT0123, TypeALU32_3op>, Enc_1997594 {
+tc_548f402d, TypeALU32_3op>, Enc_be32a5 {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11110101100;
@@ -18700,7 +18778,7 @@ def S2_parityp : HInst<
(outs IntRegs:$Rd32),
(ins DoubleRegs:$Rss32, DoubleRegs:$Rtt32),
"$Rd32 = parity($Rss32,$Rtt32)",
-ALU64_tc_2_SLOT23, TypeALU64>, Enc_9277990 {
+tc_87601822, TypeALU64>, Enc_d2216a {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11010000000;
@@ -18712,7 +18790,7 @@ def S2_pstorerbf_io : HInst<
(outs),
(ins PredRegs:$Pv4, IntRegs:$Rs32, u32_0Imm:$Ii, IntRegs:$Rt32),
"if (!$Pv4) memb($Rs32+#$Ii) = $Rt32",
-V2LDST_tc_st_SLOT01, TypeV2LDST>, Enc_14044877, AddrModeRel {
+tc_3d905451, TypeV2LDST>, Enc_da8d43, AddrModeRel {
let Inst{2-2} = 0b0;
let Inst{31-21} = 0b01000100000;
let isPredicated = 1;
@@ -18734,7 +18812,7 @@ def S2_pstorerbf_pi : HInst<
(outs IntRegs:$Rx32),
(ins PredRegs:$Pv4, IntRegs:$Rx32in, s4_0Imm:$Ii, IntRegs:$Rt32),
"if (!$Pv4) memb($Rx32++#$Ii) = $Rt32",
-ST_tc_st_pi_SLOT01, TypeST>, Enc_8065534, AddrModeRel {
+tc_9b73d261, TypeST>, Enc_cc449f, AddrModeRel {
let Inst{2-2} = 0b1;
let Inst{7-7} = 0b0;
let Inst{13-13} = 0b1;
@@ -18752,7 +18830,7 @@ def S2_pstorerbf_zomap : HInst<
(outs),
(ins PredRegs:$Pv4, IntRegs:$Rs32, IntRegs:$Rt32),
"if (!$Pv4) memb($Rs32) = $Rt32",
-PSEUDO, TypeMAPPING> {
+tc_3d905451, TypeMAPPING> {
let isPseudo = 1;
let isCodeGenOnly = 1;
}
@@ -18760,7 +18838,7 @@ def S2_pstorerbfnew_pi : HInst<
(outs IntRegs:$Rx32),
(ins PredRegs:$Pv4, IntRegs:$Rx32in, s4_0Imm:$Ii, IntRegs:$Rt32),
"if (!$Pv4.new) memb($Rx32++#$Ii) = $Rt32",
-ST_tc_st_pi_SLOT01, TypeST>, Enc_8065534, AddrModeRel {
+tc_7675c0e9, TypeST>, Enc_cc449f, AddrModeRel {
let Inst{2-2} = 0b1;
let Inst{7-7} = 0b1;
let Inst{13-13} = 0b1;
@@ -18779,7 +18857,7 @@ def S2_pstorerbnewf_io : HInst<
(outs),
(ins PredRegs:$Pv4, IntRegs:$Rs32, u32_0Imm:$Ii, IntRegs:$Nt8),
"if (!$Pv4) memb($Rs32+#$Ii) = $Nt8.new",
-V2LDST_tc_st_SLOT0, TypeV2LDST>, Enc_1737833, AddrModeRel {
+tc_9da3628f, TypeV2LDST>, Enc_585242, AddrModeRel {
let Inst{2-2} = 0b0;
let Inst{12-11} = 0b00;
let Inst{31-21} = 0b01000100101;
@@ -18788,8 +18866,8 @@ let isPredicatedFalse = 1;
let addrMode = BaseImmOffset;
let accessSize = ByteAccess;
let isNVStore = 1;
-let mayStore = 1;
let isNewValue = 1;
+let mayStore = 1;
let CextOpcode = "S2_storerb";
let InputType = "imm";
let BaseOpcode = "S2_storerb_io";
@@ -18804,7 +18882,7 @@ def S2_pstorerbnewf_pi : HInst<
(outs IntRegs:$Rx32),
(ins PredRegs:$Pv4, IntRegs:$Rx32in, s4_0Imm:$Ii, IntRegs:$Nt8),
"if (!$Pv4) memb($Rx32++#$Ii) = $Nt8.new",
-ST_tc_st_pi_SLOT0, TypeST>, Enc_2813446, AddrModeRel {
+tc_e2480a7f, TypeST>, Enc_52a5dd, AddrModeRel {
let Inst{2-2} = 0b1;
let Inst{7-7} = 0b0;
let Inst{13-11} = 0b100;
@@ -18814,8 +18892,8 @@ let isPredicatedFalse = 1;
let addrMode = PostInc;
let accessSize = ByteAccess;
let isNVStore = 1;
-let mayStore = 1;
let isNewValue = 1;
+let mayStore = 1;
let CextOpcode = "S2_storerb";
let BaseOpcode = "S2_storerb_pi";
let opNewValue = 4;
@@ -18825,7 +18903,7 @@ def S2_pstorerbnewf_zomap : HInst<
(outs),
(ins PredRegs:$Pv4, IntRegs:$Rs32, IntRegs:$Nt8),
"if (!$Pv4) memb($Rs32) = $Nt8.new",
-PSEUDO, TypeMAPPING> {
+tc_9da3628f, TypeMAPPING> {
let isPseudo = 1;
let isCodeGenOnly = 1;
let opNewValue = 2;
@@ -18834,7 +18912,7 @@ def S2_pstorerbnewfnew_pi : HInst<
(outs IntRegs:$Rx32),
(ins PredRegs:$Pv4, IntRegs:$Rx32in, s4_0Imm:$Ii, IntRegs:$Nt8),
"if (!$Pv4.new) memb($Rx32++#$Ii) = $Nt8.new",
-ST_tc_st_pi_SLOT0, TypeST>, Enc_2813446, AddrModeRel {
+tc_8fab9ac3, TypeST>, Enc_52a5dd, AddrModeRel {
let Inst{2-2} = 0b1;
let Inst{7-7} = 0b1;
let Inst{13-11} = 0b100;
@@ -18845,8 +18923,8 @@ let addrMode = PostInc;
let accessSize = ByteAccess;
let isNVStore = 1;
let isPredicatedNew = 1;
-let mayStore = 1;
let isNewValue = 1;
+let mayStore = 1;
let CextOpcode = "S2_storerb";
let BaseOpcode = "S2_storerb_pi";
let opNewValue = 4;
@@ -18856,7 +18934,7 @@ def S2_pstorerbnewt_io : HInst<
(outs),
(ins PredRegs:$Pv4, IntRegs:$Rs32, u32_0Imm:$Ii, IntRegs:$Nt8),
"if ($Pv4) memb($Rs32+#$Ii) = $Nt8.new",
-V2LDST_tc_st_SLOT0, TypeV2LDST>, Enc_1737833, AddrModeRel {
+tc_9da3628f, TypeV2LDST>, Enc_585242, AddrModeRel {
let Inst{2-2} = 0b0;
let Inst{12-11} = 0b00;
let Inst{31-21} = 0b01000000101;
@@ -18864,8 +18942,8 @@ let isPredicated = 1;
let addrMode = BaseImmOffset;
let accessSize = ByteAccess;
let isNVStore = 1;
-let mayStore = 1;
let isNewValue = 1;
+let mayStore = 1;
let CextOpcode = "S2_storerb";
let InputType = "imm";
let BaseOpcode = "S2_storerb_io";
@@ -18880,7 +18958,7 @@ def S2_pstorerbnewt_pi : HInst<
(outs IntRegs:$Rx32),
(ins PredRegs:$Pv4, IntRegs:$Rx32in, s4_0Imm:$Ii, IntRegs:$Nt8),
"if ($Pv4) memb($Rx32++#$Ii) = $Nt8.new",
-ST_tc_st_pi_SLOT0, TypeST>, Enc_2813446, AddrModeRel {
+tc_e2480a7f, TypeST>, Enc_52a5dd, AddrModeRel {
let Inst{2-2} = 0b0;
let Inst{7-7} = 0b0;
let Inst{13-11} = 0b100;
@@ -18889,8 +18967,8 @@ let isPredicated = 1;
let addrMode = PostInc;
let accessSize = ByteAccess;
let isNVStore = 1;
-let mayStore = 1;
let isNewValue = 1;
+let mayStore = 1;
let CextOpcode = "S2_storerb";
let BaseOpcode = "S2_storerb_pi";
let opNewValue = 4;
@@ -18900,7 +18978,7 @@ def S2_pstorerbnewt_zomap : HInst<
(outs),
(ins PredRegs:$Pv4, IntRegs:$Rs32, IntRegs:$Nt8),
"if ($Pv4) memb($Rs32) = $Nt8.new",
-PSEUDO, TypeMAPPING> {
+tc_9da3628f, TypeMAPPING> {
let isPseudo = 1;
let isCodeGenOnly = 1;
let opNewValue = 2;
@@ -18909,7 +18987,7 @@ def S2_pstorerbnewtnew_pi : HInst<
(outs IntRegs:$Rx32),
(ins PredRegs:$Pv4, IntRegs:$Rx32in, s4_0Imm:$Ii, IntRegs:$Nt8),
"if ($Pv4.new) memb($Rx32++#$Ii) = $Nt8.new",
-ST_tc_st_pi_SLOT0, TypeST>, Enc_2813446, AddrModeRel {
+tc_8fab9ac3, TypeST>, Enc_52a5dd, AddrModeRel {
let Inst{2-2} = 0b0;
let Inst{7-7} = 0b1;
let Inst{13-11} = 0b100;
@@ -18919,8 +18997,8 @@ let addrMode = PostInc;
let accessSize = ByteAccess;
let isNVStore = 1;
let isPredicatedNew = 1;
-let mayStore = 1;
let isNewValue = 1;
+let mayStore = 1;
let CextOpcode = "S2_storerb";
let BaseOpcode = "S2_storerb_pi";
let opNewValue = 4;
@@ -18930,7 +19008,7 @@ def S2_pstorerbt_io : HInst<
(outs),
(ins PredRegs:$Pv4, IntRegs:$Rs32, u32_0Imm:$Ii, IntRegs:$Rt32),
"if ($Pv4) memb($Rs32+#$Ii) = $Rt32",
-V2LDST_tc_st_SLOT01, TypeV2LDST>, Enc_14044877, AddrModeRel {
+tc_3d905451, TypeV2LDST>, Enc_da8d43, AddrModeRel {
let Inst{2-2} = 0b0;
let Inst{31-21} = 0b01000000000;
let isPredicated = 1;
@@ -18951,7 +19029,7 @@ def S2_pstorerbt_pi : HInst<
(outs IntRegs:$Rx32),
(ins PredRegs:$Pv4, IntRegs:$Rx32in, s4_0Imm:$Ii, IntRegs:$Rt32),
"if ($Pv4) memb($Rx32++#$Ii) = $Rt32",
-ST_tc_st_pi_SLOT01, TypeST>, Enc_8065534, AddrModeRel {
+tc_9b73d261, TypeST>, Enc_cc449f, AddrModeRel {
let Inst{2-2} = 0b0;
let Inst{7-7} = 0b0;
let Inst{13-13} = 0b1;
@@ -18968,7 +19046,7 @@ def S2_pstorerbt_zomap : HInst<
(outs),
(ins PredRegs:$Pv4, IntRegs:$Rs32, IntRegs:$Rt32),
"if ($Pv4) memb($Rs32) = $Rt32",
-PSEUDO, TypeMAPPING> {
+tc_3d905451, TypeMAPPING> {
let isPseudo = 1;
let isCodeGenOnly = 1;
}
@@ -18976,7 +19054,7 @@ def S2_pstorerbtnew_pi : HInst<
(outs IntRegs:$Rx32),
(ins PredRegs:$Pv4, IntRegs:$Rx32in, s4_0Imm:$Ii, IntRegs:$Rt32),
"if ($Pv4.new) memb($Rx32++#$Ii) = $Rt32",
-ST_tc_st_pi_SLOT01, TypeST>, Enc_8065534, AddrModeRel {
+tc_7675c0e9, TypeST>, Enc_cc449f, AddrModeRel {
let Inst{2-2} = 0b0;
let Inst{7-7} = 0b1;
let Inst{13-13} = 0b1;
@@ -18994,7 +19072,7 @@ def S2_pstorerdf_io : HInst<
(outs),
(ins PredRegs:$Pv4, IntRegs:$Rs32, u29_3Imm:$Ii, DoubleRegs:$Rtt32),
"if (!$Pv4) memd($Rs32+#$Ii) = $Rtt32",
-V2LDST_tc_st_SLOT01, TypeV2LDST>, Enc_11049656, AddrModeRel {
+tc_3d905451, TypeV2LDST>, Enc_57a33e, AddrModeRel {
let Inst{2-2} = 0b0;
let Inst{31-21} = 0b01000100110;
let isPredicated = 1;
@@ -19015,7 +19093,7 @@ def S2_pstorerdf_pi : HInst<
(outs IntRegs:$Rx32),
(ins PredRegs:$Pv4, IntRegs:$Rx32in, s4_3Imm:$Ii, DoubleRegs:$Rtt32),
"if (!$Pv4) memd($Rx32++#$Ii) = $Rtt32",
-ST_tc_st_pi_SLOT01, TypeST>, Enc_11959851, AddrModeRel {
+tc_9b73d261, TypeST>, Enc_9a33d5, AddrModeRel {
let Inst{2-2} = 0b1;
let Inst{7-7} = 0b0;
let Inst{13-13} = 0b1;
@@ -19033,7 +19111,7 @@ def S2_pstorerdf_zomap : HInst<
(outs),
(ins PredRegs:$Pv4, IntRegs:$Rs32, DoubleRegs:$Rtt32),
"if (!$Pv4) memd($Rs32) = $Rtt32",
-PSEUDO, TypeMAPPING> {
+tc_3d905451, TypeMAPPING> {
let isPseudo = 1;
let isCodeGenOnly = 1;
}
@@ -19041,7 +19119,7 @@ def S2_pstorerdfnew_pi : HInst<
(outs IntRegs:$Rx32),
(ins PredRegs:$Pv4, IntRegs:$Rx32in, s4_3Imm:$Ii, DoubleRegs:$Rtt32),
"if (!$Pv4.new) memd($Rx32++#$Ii) = $Rtt32",
-ST_tc_st_pi_SLOT01, TypeST>, Enc_11959851, AddrModeRel {
+tc_7675c0e9, TypeST>, Enc_9a33d5, AddrModeRel {
let Inst{2-2} = 0b1;
let Inst{7-7} = 0b1;
let Inst{13-13} = 0b1;
@@ -19060,7 +19138,7 @@ def S2_pstorerdt_io : HInst<
(outs),
(ins PredRegs:$Pv4, IntRegs:$Rs32, u29_3Imm:$Ii, DoubleRegs:$Rtt32),
"if ($Pv4) memd($Rs32+#$Ii) = $Rtt32",
-V2LDST_tc_st_SLOT01, TypeV2LDST>, Enc_11049656, AddrModeRel {
+tc_3d905451, TypeV2LDST>, Enc_57a33e, AddrModeRel {
let Inst{2-2} = 0b0;
let Inst{31-21} = 0b01000000110;
let isPredicated = 1;
@@ -19080,7 +19158,7 @@ def S2_pstorerdt_pi : HInst<
(outs IntRegs:$Rx32),
(ins PredRegs:$Pv4, IntRegs:$Rx32in, s4_3Imm:$Ii, DoubleRegs:$Rtt32),
"if ($Pv4) memd($Rx32++#$Ii) = $Rtt32",
-ST_tc_st_pi_SLOT01, TypeST>, Enc_11959851, AddrModeRel {
+tc_9b73d261, TypeST>, Enc_9a33d5, AddrModeRel {
let Inst{2-2} = 0b0;
let Inst{7-7} = 0b0;
let Inst{13-13} = 0b1;
@@ -19097,7 +19175,7 @@ def S2_pstorerdt_zomap : HInst<
(outs),
(ins PredRegs:$Pv4, IntRegs:$Rs32, DoubleRegs:$Rtt32),
"if ($Pv4) memd($Rs32) = $Rtt32",
-PSEUDO, TypeMAPPING> {
+tc_3d905451, TypeMAPPING> {
let isPseudo = 1;
let isCodeGenOnly = 1;
}
@@ -19105,7 +19183,7 @@ def S2_pstorerdtnew_pi : HInst<
(outs IntRegs:$Rx32),
(ins PredRegs:$Pv4, IntRegs:$Rx32in, s4_3Imm:$Ii, DoubleRegs:$Rtt32),
"if ($Pv4.new) memd($Rx32++#$Ii) = $Rtt32",
-ST_tc_st_pi_SLOT01, TypeST>, Enc_11959851, AddrModeRel {
+tc_7675c0e9, TypeST>, Enc_9a33d5, AddrModeRel {
let Inst{2-2} = 0b0;
let Inst{7-7} = 0b1;
let Inst{13-13} = 0b1;
@@ -19123,7 +19201,7 @@ def S2_pstorerff_io : HInst<
(outs),
(ins PredRegs:$Pv4, IntRegs:$Rs32, u31_1Imm:$Ii, IntRegs:$Rt32),
"if (!$Pv4) memh($Rs32+#$Ii) = $Rt32.h",
-V2LDST_tc_st_SLOT01, TypeV2LDST>, Enc_10979813, AddrModeRel {
+tc_3d905451, TypeV2LDST>, Enc_e8c45e, AddrModeRel {
let Inst{2-2} = 0b0;
let Inst{31-21} = 0b01000100011;
let isPredicated = 1;
@@ -19144,7 +19222,7 @@ def S2_pstorerff_pi : HInst<
(outs IntRegs:$Rx32),
(ins PredRegs:$Pv4, IntRegs:$Rx32in, s4_1Imm:$Ii, IntRegs:$Rt32),
"if (!$Pv4) memh($Rx32++#$Ii) = $Rt32.h",
-ST_tc_st_pi_SLOT01, TypeST>, Enc_11065510, AddrModeRel {
+tc_9b73d261, TypeST>, Enc_b886fd, AddrModeRel {
let Inst{2-2} = 0b1;
let Inst{7-7} = 0b0;
let Inst{13-13} = 0b1;
@@ -19162,7 +19240,7 @@ def S2_pstorerff_zomap : HInst<
(outs),
(ins PredRegs:$Pv4, IntRegs:$Rs32, IntRegs:$Rt32),
"if (!$Pv4) memh($Rs32) = $Rt32.h",
-PSEUDO, TypeMAPPING> {
+tc_3d905451, TypeMAPPING> {
let isPseudo = 1;
let isCodeGenOnly = 1;
}
@@ -19170,7 +19248,7 @@ def S2_pstorerffnew_pi : HInst<
(outs IntRegs:$Rx32),
(ins PredRegs:$Pv4, IntRegs:$Rx32in, s4_1Imm:$Ii, IntRegs:$Rt32),
"if (!$Pv4.new) memh($Rx32++#$Ii) = $Rt32.h",
-ST_tc_st_pi_SLOT01, TypeST>, Enc_11065510, AddrModeRel {
+tc_7675c0e9, TypeST>, Enc_b886fd, AddrModeRel {
let Inst{2-2} = 0b1;
let Inst{7-7} = 0b1;
let Inst{13-13} = 0b1;
@@ -19189,7 +19267,7 @@ def S2_pstorerft_io : HInst<
(outs),
(ins PredRegs:$Pv4, IntRegs:$Rs32, u31_1Imm:$Ii, IntRegs:$Rt32),
"if ($Pv4) memh($Rs32+#$Ii) = $Rt32.h",
-V2LDST_tc_st_SLOT01, TypeV2LDST>, Enc_10979813, AddrModeRel {
+tc_3d905451, TypeV2LDST>, Enc_e8c45e, AddrModeRel {
let Inst{2-2} = 0b0;
let Inst{31-21} = 0b01000000011;
let isPredicated = 1;
@@ -19209,7 +19287,7 @@ def S2_pstorerft_pi : HInst<
(outs IntRegs:$Rx32),
(ins PredRegs:$Pv4, IntRegs:$Rx32in, s4_1Imm:$Ii, IntRegs:$Rt32),
"if ($Pv4) memh($Rx32++#$Ii) = $Rt32.h",
-ST_tc_st_pi_SLOT01, TypeST>, Enc_11065510, AddrModeRel {
+tc_9b73d261, TypeST>, Enc_b886fd, AddrModeRel {
let Inst{2-2} = 0b0;
let Inst{7-7} = 0b0;
let Inst{13-13} = 0b1;
@@ -19226,7 +19304,7 @@ def S2_pstorerft_zomap : HInst<
(outs),
(ins PredRegs:$Pv4, IntRegs:$Rs32, IntRegs:$Rt32),
"if ($Pv4) memh($Rs32) = $Rt32.h",
-PSEUDO, TypeMAPPING> {
+tc_3d905451, TypeMAPPING> {
let isPseudo = 1;
let isCodeGenOnly = 1;
}
@@ -19234,7 +19312,7 @@ def S2_pstorerftnew_pi : HInst<
(outs IntRegs:$Rx32),
(ins PredRegs:$Pv4, IntRegs:$Rx32in, s4_1Imm:$Ii, IntRegs:$Rt32),
"if ($Pv4.new) memh($Rx32++#$Ii) = $Rt32.h",
-ST_tc_st_pi_SLOT01, TypeST>, Enc_11065510, AddrModeRel {
+tc_7675c0e9, TypeST>, Enc_b886fd, AddrModeRel {
let Inst{2-2} = 0b0;
let Inst{7-7} = 0b1;
let Inst{13-13} = 0b1;
@@ -19252,7 +19330,7 @@ def S2_pstorerhf_io : HInst<
(outs),
(ins PredRegs:$Pv4, IntRegs:$Rs32, u31_1Imm:$Ii, IntRegs:$Rt32),
"if (!$Pv4) memh($Rs32+#$Ii) = $Rt32",
-V2LDST_tc_st_SLOT01, TypeV2LDST>, Enc_10979813, AddrModeRel {
+tc_3d905451, TypeV2LDST>, Enc_e8c45e, AddrModeRel {
let Inst{2-2} = 0b0;
let Inst{31-21} = 0b01000100010;
let isPredicated = 1;
@@ -19274,7 +19352,7 @@ def S2_pstorerhf_pi : HInst<
(outs IntRegs:$Rx32),
(ins PredRegs:$Pv4, IntRegs:$Rx32in, s4_1Imm:$Ii, IntRegs:$Rt32),
"if (!$Pv4) memh($Rx32++#$Ii) = $Rt32",
-ST_tc_st_pi_SLOT01, TypeST>, Enc_11065510, AddrModeRel {
+tc_9b73d261, TypeST>, Enc_b886fd, AddrModeRel {
let Inst{2-2} = 0b1;
let Inst{7-7} = 0b0;
let Inst{13-13} = 0b1;
@@ -19292,7 +19370,7 @@ def S2_pstorerhf_zomap : HInst<
(outs),
(ins PredRegs:$Pv4, IntRegs:$Rs32, IntRegs:$Rt32),
"if (!$Pv4) memh($Rs32) = $Rt32",
-PSEUDO, TypeMAPPING> {
+tc_3d905451, TypeMAPPING> {
let isPseudo = 1;
let isCodeGenOnly = 1;
}
@@ -19300,7 +19378,7 @@ def S2_pstorerhfnew_pi : HInst<
(outs IntRegs:$Rx32),
(ins PredRegs:$Pv4, IntRegs:$Rx32in, s4_1Imm:$Ii, IntRegs:$Rt32),
"if (!$Pv4.new) memh($Rx32++#$Ii) = $Rt32",
-ST_tc_st_pi_SLOT01, TypeST>, Enc_11065510, AddrModeRel {
+tc_7675c0e9, TypeST>, Enc_b886fd, AddrModeRel {
let Inst{2-2} = 0b1;
let Inst{7-7} = 0b1;
let Inst{13-13} = 0b1;
@@ -19319,7 +19397,7 @@ def S2_pstorerhnewf_io : HInst<
(outs),
(ins PredRegs:$Pv4, IntRegs:$Rs32, u31_1Imm:$Ii, IntRegs:$Nt8),
"if (!$Pv4) memh($Rs32+#$Ii) = $Nt8.new",
-V2LDST_tc_st_SLOT0, TypeV2LDST>, Enc_6154421, AddrModeRel {
+tc_9da3628f, TypeV2LDST>, Enc_f44229, AddrModeRel {
let Inst{2-2} = 0b0;
let Inst{12-11} = 0b01;
let Inst{31-21} = 0b01000100101;
@@ -19328,8 +19406,8 @@ let isPredicatedFalse = 1;
let addrMode = BaseImmOffset;
let accessSize = HalfWordAccess;
let isNVStore = 1;
-let mayStore = 1;
let isNewValue = 1;
+let mayStore = 1;
let CextOpcode = "S2_storerh";
let InputType = "imm";
let BaseOpcode = "S2_storerh_io";
@@ -19344,7 +19422,7 @@ def S2_pstorerhnewf_pi : HInst<
(outs IntRegs:$Rx32),
(ins PredRegs:$Pv4, IntRegs:$Rx32in, s4_1Imm:$Ii, IntRegs:$Nt8),
"if (!$Pv4) memh($Rx32++#$Ii) = $Nt8.new",
-ST_tc_st_pi_SLOT0, TypeST>, Enc_3813442, AddrModeRel {
+tc_e2480a7f, TypeST>, Enc_31aa6a, AddrModeRel {
let Inst{2-2} = 0b1;
let Inst{7-7} = 0b0;
let Inst{13-11} = 0b101;
@@ -19354,8 +19432,8 @@ let isPredicatedFalse = 1;
let addrMode = PostInc;
let accessSize = HalfWordAccess;
let isNVStore = 1;
-let mayStore = 1;
let isNewValue = 1;
+let mayStore = 1;
let CextOpcode = "S2_storerh";
let BaseOpcode = "S2_storerh_pi";
let opNewValue = 4;
@@ -19365,7 +19443,7 @@ def S2_pstorerhnewf_zomap : HInst<
(outs),
(ins PredRegs:$Pv4, IntRegs:$Rs32, IntRegs:$Nt8),
"if (!$Pv4) memh($Rs32) = $Nt8.new",
-PSEUDO, TypeMAPPING> {
+tc_9da3628f, TypeMAPPING> {
let isPseudo = 1;
let isCodeGenOnly = 1;
let opNewValue = 2;
@@ -19374,7 +19452,7 @@ def S2_pstorerhnewfnew_pi : HInst<
(outs IntRegs:$Rx32),
(ins PredRegs:$Pv4, IntRegs:$Rx32in, s4_1Imm:$Ii, IntRegs:$Nt8),
"if (!$Pv4.new) memh($Rx32++#$Ii) = $Nt8.new",
-ST_tc_st_pi_SLOT0, TypeST>, Enc_3813442, AddrModeRel {
+tc_8fab9ac3, TypeST>, Enc_31aa6a, AddrModeRel {
let Inst{2-2} = 0b1;
let Inst{7-7} = 0b1;
let Inst{13-11} = 0b101;
@@ -19385,8 +19463,8 @@ let addrMode = PostInc;
let accessSize = HalfWordAccess;
let isNVStore = 1;
let isPredicatedNew = 1;
-let mayStore = 1;
let isNewValue = 1;
+let mayStore = 1;
let CextOpcode = "S2_storerh";
let BaseOpcode = "S2_storerh_pi";
let opNewValue = 4;
@@ -19396,7 +19474,7 @@ def S2_pstorerhnewt_io : HInst<
(outs),
(ins PredRegs:$Pv4, IntRegs:$Rs32, u31_1Imm:$Ii, IntRegs:$Nt8),
"if ($Pv4) memh($Rs32+#$Ii) = $Nt8.new",
-V2LDST_tc_st_SLOT0, TypeV2LDST>, Enc_6154421, AddrModeRel {
+tc_9da3628f, TypeV2LDST>, Enc_f44229, AddrModeRel {
let Inst{2-2} = 0b0;
let Inst{12-11} = 0b01;
let Inst{31-21} = 0b01000000101;
@@ -19404,8 +19482,8 @@ let isPredicated = 1;
let addrMode = BaseImmOffset;
let accessSize = HalfWordAccess;
let isNVStore = 1;
-let mayStore = 1;
let isNewValue = 1;
+let mayStore = 1;
let CextOpcode = "S2_storerh";
let InputType = "imm";
let BaseOpcode = "S2_storerh_io";
@@ -19420,7 +19498,7 @@ def S2_pstorerhnewt_pi : HInst<
(outs IntRegs:$Rx32),
(ins PredRegs:$Pv4, IntRegs:$Rx32in, s4_1Imm:$Ii, IntRegs:$Nt8),
"if ($Pv4) memh($Rx32++#$Ii) = $Nt8.new",
-ST_tc_st_pi_SLOT0, TypeST>, Enc_3813442, AddrModeRel {
+tc_e2480a7f, TypeST>, Enc_31aa6a, AddrModeRel {
let Inst{2-2} = 0b0;
let Inst{7-7} = 0b0;
let Inst{13-11} = 0b101;
@@ -19429,8 +19507,8 @@ let isPredicated = 1;
let addrMode = PostInc;
let accessSize = HalfWordAccess;
let isNVStore = 1;
-let mayStore = 1;
let isNewValue = 1;
+let mayStore = 1;
let CextOpcode = "S2_storerh";
let BaseOpcode = "S2_storerh_pi";
let opNewValue = 4;
@@ -19440,7 +19518,7 @@ def S2_pstorerhnewt_zomap : HInst<
(outs),
(ins PredRegs:$Pv4, IntRegs:$Rs32, IntRegs:$Nt8),
"if ($Pv4) memh($Rs32) = $Nt8.new",
-PSEUDO, TypeMAPPING> {
+tc_9da3628f, TypeMAPPING> {
let isPseudo = 1;
let isCodeGenOnly = 1;
let opNewValue = 2;
@@ -19449,7 +19527,7 @@ def S2_pstorerhnewtnew_pi : HInst<
(outs IntRegs:$Rx32),
(ins PredRegs:$Pv4, IntRegs:$Rx32in, s4_1Imm:$Ii, IntRegs:$Nt8),
"if ($Pv4.new) memh($Rx32++#$Ii) = $Nt8.new",
-ST_tc_st_pi_SLOT0, TypeST>, Enc_3813442, AddrModeRel {
+tc_8fab9ac3, TypeST>, Enc_31aa6a, AddrModeRel {
let Inst{2-2} = 0b0;
let Inst{7-7} = 0b1;
let Inst{13-11} = 0b101;
@@ -19459,8 +19537,8 @@ let addrMode = PostInc;
let accessSize = HalfWordAccess;
let isNVStore = 1;
let isPredicatedNew = 1;
-let mayStore = 1;
let isNewValue = 1;
+let mayStore = 1;
let CextOpcode = "S2_storerh";
let BaseOpcode = "S2_storerh_pi";
let opNewValue = 4;
@@ -19470,7 +19548,7 @@ def S2_pstorerht_io : HInst<
(outs),
(ins PredRegs:$Pv4, IntRegs:$Rs32, u31_1Imm:$Ii, IntRegs:$Rt32),
"if ($Pv4) memh($Rs32+#$Ii) = $Rt32",
-V2LDST_tc_st_SLOT01, TypeV2LDST>, Enc_10979813, AddrModeRel {
+tc_3d905451, TypeV2LDST>, Enc_e8c45e, AddrModeRel {
let Inst{2-2} = 0b0;
let Inst{31-21} = 0b01000000010;
let isPredicated = 1;
@@ -19491,7 +19569,7 @@ def S2_pstorerht_pi : HInst<
(outs IntRegs:$Rx32),
(ins PredRegs:$Pv4, IntRegs:$Rx32in, s4_1Imm:$Ii, IntRegs:$Rt32),
"if ($Pv4) memh($Rx32++#$Ii) = $Rt32",
-ST_tc_st_pi_SLOT01, TypeST>, Enc_11065510, AddrModeRel {
+tc_9b73d261, TypeST>, Enc_b886fd, AddrModeRel {
let Inst{2-2} = 0b0;
let Inst{7-7} = 0b0;
let Inst{13-13} = 0b1;
@@ -19508,7 +19586,7 @@ def S2_pstorerht_zomap : HInst<
(outs),
(ins PredRegs:$Pv4, IntRegs:$Rs32, IntRegs:$Rt32),
"if ($Pv4) memh($Rs32) = $Rt32",
-PSEUDO, TypeMAPPING> {
+tc_3d905451, TypeMAPPING> {
let isPseudo = 1;
let isCodeGenOnly = 1;
}
@@ -19516,7 +19594,7 @@ def S2_pstorerhtnew_pi : HInst<
(outs IntRegs:$Rx32),
(ins PredRegs:$Pv4, IntRegs:$Rx32in, s4_1Imm:$Ii, IntRegs:$Rt32),
"if ($Pv4.new) memh($Rx32++#$Ii) = $Rt32",
-ST_tc_st_pi_SLOT01, TypeST>, Enc_11065510, AddrModeRel {
+tc_7675c0e9, TypeST>, Enc_b886fd, AddrModeRel {
let Inst{2-2} = 0b0;
let Inst{7-7} = 0b1;
let Inst{13-13} = 0b1;
@@ -19534,7 +19612,7 @@ def S2_pstorerif_io : HInst<
(outs),
(ins PredRegs:$Pv4, IntRegs:$Rs32, u30_2Imm:$Ii, IntRegs:$Rt32),
"if (!$Pv4) memw($Rs32+#$Ii) = $Rt32",
-V2LDST_tc_st_SLOT01, TypeV2LDST>, Enc_8225953, AddrModeRel {
+tc_3d905451, TypeV2LDST>, Enc_397f23, AddrModeRel {
let Inst{2-2} = 0b0;
let Inst{31-21} = 0b01000100100;
let isPredicated = 1;
@@ -19556,7 +19634,7 @@ def S2_pstorerif_pi : HInst<
(outs IntRegs:$Rx32),
(ins PredRegs:$Pv4, IntRegs:$Rx32in, s4_2Imm:$Ii, IntRegs:$Rt32),
"if (!$Pv4) memw($Rx32++#$Ii) = $Rt32",
-ST_tc_st_pi_SLOT01, TypeST>, Enc_10065510, AddrModeRel {
+tc_9b73d261, TypeST>, Enc_7eaeb6, AddrModeRel {
let Inst{2-2} = 0b1;
let Inst{7-7} = 0b0;
let Inst{13-13} = 0b1;
@@ -19574,7 +19652,7 @@ def S2_pstorerif_zomap : HInst<
(outs),
(ins PredRegs:$Pv4, IntRegs:$Rs32, IntRegs:$Rt32),
"if (!$Pv4) memw($Rs32) = $Rt32",
-PSEUDO, TypeMAPPING> {
+tc_3d905451, TypeMAPPING> {
let isPseudo = 1;
let isCodeGenOnly = 1;
}
@@ -19582,7 +19660,7 @@ def S2_pstorerifnew_pi : HInst<
(outs IntRegs:$Rx32),
(ins PredRegs:$Pv4, IntRegs:$Rx32in, s4_2Imm:$Ii, IntRegs:$Rt32),
"if (!$Pv4.new) memw($Rx32++#$Ii) = $Rt32",
-ST_tc_st_pi_SLOT01, TypeST>, Enc_10065510, AddrModeRel {
+tc_7675c0e9, TypeST>, Enc_7eaeb6, AddrModeRel {
let Inst{2-2} = 0b1;
let Inst{7-7} = 0b1;
let Inst{13-13} = 0b1;
@@ -19602,7 +19680,7 @@ def S2_pstorerinewf_io : HInst<
(outs),
(ins PredRegs:$Pv4, IntRegs:$Rs32, u30_2Imm:$Ii, IntRegs:$Nt8),
"if (!$Pv4) memw($Rs32+#$Ii) = $Nt8.new",
-V2LDST_tc_st_SLOT0, TypeV2LDST>, Enc_11224149, AddrModeRel {
+tc_9da3628f, TypeV2LDST>, Enc_8dbdfe, AddrModeRel {
let Inst{2-2} = 0b0;
let Inst{12-11} = 0b10;
let Inst{31-21} = 0b01000100101;
@@ -19611,8 +19689,8 @@ let isPredicatedFalse = 1;
let addrMode = BaseImmOffset;
let accessSize = WordAccess;
let isNVStore = 1;
-let mayStore = 1;
let isNewValue = 1;
+let mayStore = 1;
let CextOpcode = "S2_storeri";
let InputType = "imm";
let BaseOpcode = "S2_storeri_io";
@@ -19627,7 +19705,7 @@ def S2_pstorerinewf_pi : HInst<
(outs IntRegs:$Rx32),
(ins PredRegs:$Pv4, IntRegs:$Rx32in, s4_2Imm:$Ii, IntRegs:$Nt8),
"if (!$Pv4) memw($Rx32++#$Ii) = $Nt8.new",
-ST_tc_st_pi_SLOT0, TypeST>, Enc_4813442, AddrModeRel {
+tc_e2480a7f, TypeST>, Enc_65f095, AddrModeRel {
let Inst{2-2} = 0b1;
let Inst{7-7} = 0b0;
let Inst{13-11} = 0b110;
@@ -19637,8 +19715,8 @@ let isPredicatedFalse = 1;
let addrMode = PostInc;
let accessSize = WordAccess;
let isNVStore = 1;
-let mayStore = 1;
let isNewValue = 1;
+let mayStore = 1;
let CextOpcode = "S2_storeri";
let BaseOpcode = "S2_storeri_pi";
let opNewValue = 4;
@@ -19648,7 +19726,7 @@ def S2_pstorerinewf_zomap : HInst<
(outs),
(ins PredRegs:$Pv4, IntRegs:$Rs32, IntRegs:$Nt8),
"if (!$Pv4) memw($Rs32) = $Nt8.new",
-PSEUDO, TypeMAPPING> {
+tc_9da3628f, TypeMAPPING> {
let isPseudo = 1;
let isCodeGenOnly = 1;
let opNewValue = 2;
@@ -19657,7 +19735,7 @@ def S2_pstorerinewfnew_pi : HInst<
(outs IntRegs:$Rx32),
(ins PredRegs:$Pv4, IntRegs:$Rx32in, s4_2Imm:$Ii, IntRegs:$Nt8),
"if (!$Pv4.new) memw($Rx32++#$Ii) = $Nt8.new",
-ST_tc_st_pi_SLOT0, TypeST>, Enc_4813442, AddrModeRel {
+tc_8fab9ac3, TypeST>, Enc_65f095, AddrModeRel {
let Inst{2-2} = 0b1;
let Inst{7-7} = 0b1;
let Inst{13-11} = 0b110;
@@ -19668,8 +19746,8 @@ let addrMode = PostInc;
let accessSize = WordAccess;
let isNVStore = 1;
let isPredicatedNew = 1;
-let mayStore = 1;
let isNewValue = 1;
+let mayStore = 1;
let CextOpcode = "S2_storeri";
let BaseOpcode = "S2_storeri_pi";
let opNewValue = 4;
@@ -19679,7 +19757,7 @@ def S2_pstorerinewt_io : HInst<
(outs),
(ins PredRegs:$Pv4, IntRegs:$Rs32, u30_2Imm:$Ii, IntRegs:$Nt8),
"if ($Pv4) memw($Rs32+#$Ii) = $Nt8.new",
-V2LDST_tc_st_SLOT0, TypeV2LDST>, Enc_11224149, AddrModeRel {
+tc_9da3628f, TypeV2LDST>, Enc_8dbdfe, AddrModeRel {
let Inst{2-2} = 0b0;
let Inst{12-11} = 0b10;
let Inst{31-21} = 0b01000000101;
@@ -19687,8 +19765,8 @@ let isPredicated = 1;
let addrMode = BaseImmOffset;
let accessSize = WordAccess;
let isNVStore = 1;
-let mayStore = 1;
let isNewValue = 1;
+let mayStore = 1;
let CextOpcode = "S2_storeri";
let InputType = "imm";
let BaseOpcode = "S2_storeri_io";
@@ -19703,7 +19781,7 @@ def S2_pstorerinewt_pi : HInst<
(outs IntRegs:$Rx32),
(ins PredRegs:$Pv4, IntRegs:$Rx32in, s4_2Imm:$Ii, IntRegs:$Nt8),
"if ($Pv4) memw($Rx32++#$Ii) = $Nt8.new",
-ST_tc_st_pi_SLOT0, TypeST>, Enc_4813442, AddrModeRel {
+tc_e2480a7f, TypeST>, Enc_65f095, AddrModeRel {
let Inst{2-2} = 0b0;
let Inst{7-7} = 0b0;
let Inst{13-11} = 0b110;
@@ -19712,8 +19790,8 @@ let isPredicated = 1;
let addrMode = PostInc;
let accessSize = WordAccess;
let isNVStore = 1;
-let mayStore = 1;
let isNewValue = 1;
+let mayStore = 1;
let CextOpcode = "S2_storeri";
let BaseOpcode = "S2_storeri_pi";
let opNewValue = 4;
@@ -19723,7 +19801,7 @@ def S2_pstorerinewt_zomap : HInst<
(outs),
(ins PredRegs:$Pv4, IntRegs:$Rs32, IntRegs:$Nt8),
"if ($Pv4) memw($Rs32) = $Nt8.new",
-PSEUDO, TypeMAPPING> {
+tc_9da3628f, TypeMAPPING> {
let isPseudo = 1;
let isCodeGenOnly = 1;
let opNewValue = 2;
@@ -19732,7 +19810,7 @@ def S2_pstorerinewtnew_pi : HInst<
(outs IntRegs:$Rx32),
(ins PredRegs:$Pv4, IntRegs:$Rx32in, s4_2Imm:$Ii, IntRegs:$Nt8),
"if ($Pv4.new) memw($Rx32++#$Ii) = $Nt8.new",
-ST_tc_st_pi_SLOT0, TypeST>, Enc_4813442, AddrModeRel {
+tc_8fab9ac3, TypeST>, Enc_65f095, AddrModeRel {
let Inst{2-2} = 0b0;
let Inst{7-7} = 0b1;
let Inst{13-11} = 0b110;
@@ -19742,8 +19820,8 @@ let addrMode = PostInc;
let accessSize = WordAccess;
let isNVStore = 1;
let isPredicatedNew = 1;
-let mayStore = 1;
let isNewValue = 1;
+let mayStore = 1;
let CextOpcode = "S2_storeri";
let BaseOpcode = "S2_storeri_pi";
let opNewValue = 4;
@@ -19753,7 +19831,7 @@ def S2_pstorerit_io : HInst<
(outs),
(ins PredRegs:$Pv4, IntRegs:$Rs32, u30_2Imm:$Ii, IntRegs:$Rt32),
"if ($Pv4) memw($Rs32+#$Ii) = $Rt32",
-V2LDST_tc_st_SLOT01, TypeV2LDST>, Enc_8225953, AddrModeRel {
+tc_3d905451, TypeV2LDST>, Enc_397f23, AddrModeRel {
let Inst{2-2} = 0b0;
let Inst{31-21} = 0b01000000100;
let isPredicated = 1;
@@ -19774,7 +19852,7 @@ def S2_pstorerit_pi : HInst<
(outs IntRegs:$Rx32),
(ins PredRegs:$Pv4, IntRegs:$Rx32in, s4_2Imm:$Ii, IntRegs:$Rt32),
"if ($Pv4) memw($Rx32++#$Ii) = $Rt32",
-ST_tc_st_pi_SLOT01, TypeST>, Enc_10065510, AddrModeRel {
+tc_9b73d261, TypeST>, Enc_7eaeb6, AddrModeRel {
let Inst{2-2} = 0b0;
let Inst{7-7} = 0b0;
let Inst{13-13} = 0b1;
@@ -19791,7 +19869,7 @@ def S2_pstorerit_zomap : HInst<
(outs),
(ins PredRegs:$Pv4, IntRegs:$Rs32, IntRegs:$Rt32),
"if ($Pv4) memw($Rs32) = $Rt32",
-PSEUDO, TypeMAPPING> {
+tc_3d905451, TypeMAPPING> {
let isPseudo = 1;
let isCodeGenOnly = 1;
}
@@ -19799,7 +19877,7 @@ def S2_pstoreritnew_pi : HInst<
(outs IntRegs:$Rx32),
(ins PredRegs:$Pv4, IntRegs:$Rx32in, s4_2Imm:$Ii, IntRegs:$Rt32),
"if ($Pv4.new) memw($Rx32++#$Ii) = $Rt32",
-ST_tc_st_pi_SLOT01, TypeST>, Enc_10065510, AddrModeRel {
+tc_7675c0e9, TypeST>, Enc_7eaeb6, AddrModeRel {
let Inst{2-2} = 0b0;
let Inst{7-7} = 0b1;
let Inst{13-13} = 0b1;
@@ -19817,7 +19895,7 @@ def S2_setbit_i : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rs32, u5_0Imm:$Ii),
"$Rd32 = setbit($Rs32,#$Ii)",
-S_2op_tc_1_SLOT23, TypeS_2op>, Enc_2771456 {
+tc_9c18c9a5, TypeS_2op>, Enc_a05677 {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b10001100110;
@@ -19828,7 +19906,7 @@ def S2_setbit_r : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rs32, IntRegs:$Rt32),
"$Rd32 = setbit($Rs32,$Rt32)",
-S_3op_tc_1_SLOT23, TypeS_3op>, Enc_14071773 {
+tc_9c18c9a5, TypeS_3op>, Enc_5ab2be {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11000110100;
@@ -19839,7 +19917,7 @@ def S2_shuffeb : HInst<
(outs DoubleRegs:$Rdd32),
(ins DoubleRegs:$Rss32, DoubleRegs:$Rtt32),
"$Rdd32 = shuffeb($Rss32,$Rtt32)",
-S_3op_tc_1_SLOT23, TypeS_3op>, Enc_8333157 {
+tc_9c18c9a5, TypeS_3op>, Enc_a56825 {
let Inst{7-5} = 0b010;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11000001000;
@@ -19848,7 +19926,7 @@ def S2_shuffeh : HInst<
(outs DoubleRegs:$Rdd32),
(ins DoubleRegs:$Rss32, DoubleRegs:$Rtt32),
"$Rdd32 = shuffeh($Rss32,$Rtt32)",
-S_3op_tc_1_SLOT23, TypeS_3op>, Enc_8333157 {
+tc_9c18c9a5, TypeS_3op>, Enc_a56825 {
let Inst{7-5} = 0b110;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11000001000;
@@ -19857,7 +19935,7 @@ def S2_shuffob : HInst<
(outs DoubleRegs:$Rdd32),
(ins DoubleRegs:$Rtt32, DoubleRegs:$Rss32),
"$Rdd32 = shuffob($Rtt32,$Rss32)",
-S_3op_tc_1_SLOT23, TypeS_3op>, Enc_11687333 {
+tc_9c18c9a5, TypeS_3op>, Enc_ea23e4 {
let Inst{7-5} = 0b100;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11000001000;
@@ -19866,7 +19944,7 @@ def S2_shuffoh : HInst<
(outs DoubleRegs:$Rdd32),
(ins DoubleRegs:$Rtt32, DoubleRegs:$Rss32),
"$Rdd32 = shuffoh($Rtt32,$Rss32)",
-S_3op_tc_1_SLOT23, TypeS_3op>, Enc_11687333 {
+tc_9c18c9a5, TypeS_3op>, Enc_ea23e4 {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11000001100;
@@ -19875,7 +19953,7 @@ def S2_storerb_io : HInst<
(outs),
(ins IntRegs:$Rs32, s32_0Imm:$Ii, IntRegs:$Rt32),
"memb($Rs32+#$Ii) = $Rt32",
-ST_tc_st_SLOT01, TypeST>, Enc_13150110, AddrModeRel {
+tc_53ee6546, TypeST>, Enc_448f7f, AddrModeRel {
let Inst{24-21} = 0b1000;
let Inst{31-27} = 0b10100;
let addrMode = BaseImmOffset;
@@ -19896,7 +19974,7 @@ def S2_storerb_pbr : HInst<
(outs IntRegs:$Rx32),
(ins IntRegs:$Rx32in, ModRegs:$Mu2, IntRegs:$Rt32),
"memb($Rx32++$Mu2:brev) = $Rt32",
-ST_tc_st_SLOT01, TypeST>, Enc_7255914, AddrModeRel {
+tc_20a8e109, TypeST>, Enc_d5c73f, AddrModeRel {
let Inst{7-0} = 0b00000000;
let Inst{31-21} = 0b10101111000;
let accessSize = ByteAccess;
@@ -19909,7 +19987,7 @@ def S2_storerb_pci : HInst<
(outs IntRegs:$Rx32),
(ins IntRegs:$Rx32in, s4_0Imm:$Ii, ModRegs:$Mu2, IntRegs:$Rt32),
"memb($Rx32++#$Ii:circ($Mu2)) = $Rt32",
-ST_tc_st_SLOT01, TypeST>, Enc_3915770 {
+tc_251c87b2, TypeST>, Enc_b15941 {
let Inst{2-0} = 0b000;
let Inst{7-7} = 0b0;
let Inst{31-21} = 0b10101001000;
@@ -19924,7 +20002,7 @@ def S2_storerb_pcr : HInst<
(outs IntRegs:$Rx32),
(ins IntRegs:$Rx32in, ModRegs:$Mu2, IntRegs:$Rt32),
"memb($Rx32++I:circ($Mu2)) = $Rt32",
-ST_tc_st_SLOT01, TypeST>, Enc_7255914 {
+tc_20a8e109, TypeST>, Enc_d5c73f {
let Inst{7-0} = 0b00000010;
let Inst{31-21} = 0b10101001000;
let addrMode = PostInc;
@@ -19938,7 +20016,7 @@ def S2_storerb_pi : HInst<
(outs IntRegs:$Rx32),
(ins IntRegs:$Rx32in, s4_0Imm:$Ii, IntRegs:$Rt32),
"memb($Rx32++#$Ii) = $Rt32",
-ST_tc_st_pi_SLOT01, TypeST>, Enc_12492533, AddrModeRel {
+tc_20a8e109, TypeST>, Enc_10bc21, AddrModeRel {
let Inst{2-0} = 0b000;
let Inst{7-7} = 0b0;
let Inst{13-13} = 0b0;
@@ -19955,7 +20033,7 @@ def S2_storerb_pr : HInst<
(outs IntRegs:$Rx32),
(ins IntRegs:$Rx32in, ModRegs:$Mu2, IntRegs:$Rt32),
"memb($Rx32++$Mu2) = $Rt32",
-ST_tc_st_SLOT01, TypeST>, Enc_7255914 {
+tc_20a8e109, TypeST>, Enc_d5c73f {
let Inst{7-0} = 0b00000000;
let Inst{31-21} = 0b10101101000;
let addrMode = PostInc;
@@ -19968,7 +20046,7 @@ def S2_storerb_zomap : HInst<
(outs),
(ins IntRegs:$Rs32, IntRegs:$Rt32),
"memb($Rs32) = $Rt32",
-PSEUDO, TypeMAPPING> {
+tc_53ee6546, TypeMAPPING> {
let isPseudo = 1;
let isCodeGenOnly = 1;
}
@@ -19976,7 +20054,7 @@ def S2_storerbgp : HInst<
(outs),
(ins u32_0Imm:$Ii, IntRegs:$Rt32),
"memb(gp+#$Ii) = $Rt32",
-V2LDST_tc_st_SLOT01, TypeV2LDST>, Enc_12395768, AddrModeRel {
+tc_c14739d5, TypeV2LDST>, Enc_1b64fb, AddrModeRel {
let Inst{24-21} = 0b0000;
let Inst{31-27} = 0b01001;
let accessSize = ByteAccess;
@@ -19994,15 +20072,15 @@ def S2_storerbnew_io : HInst<
(outs),
(ins IntRegs:$Rs32, s32_0Imm:$Ii, IntRegs:$Nt8),
"memb($Rs32+#$Ii) = $Nt8.new",
-ST_tc_st_SLOT0, TypeST>, Enc_10002182, AddrModeRel {
+tc_6c576d46, TypeST>, Enc_4df4e9, AddrModeRel {
let Inst{12-11} = 0b00;
let Inst{24-21} = 0b1101;
let Inst{31-27} = 0b10100;
let addrMode = BaseImmOffset;
let accessSize = ByteAccess;
let isNVStore = 1;
-let mayStore = 1;
let isNewValue = 1;
+let mayStore = 1;
let CextOpcode = "S2_storerb";
let InputType = "imm";
let BaseOpcode = "S2_storerb_io";
@@ -20018,14 +20096,14 @@ def S2_storerbnew_pbr : HInst<
(outs IntRegs:$Rx32),
(ins IntRegs:$Rx32in, ModRegs:$Mu2, IntRegs:$Nt8),
"memb($Rx32++$Mu2:brev) = $Nt8.new",
-NCJ_tc_3or4stall_SLOT0, TypeST>, Enc_10067774, AddrModeRel {
+tc_c8f9a6f6, TypeST>, Enc_8dbe85, AddrModeRel {
let Inst{7-0} = 0b00000000;
let Inst{12-11} = 0b00;
let Inst{31-21} = 0b10101111101;
let accessSize = ByteAccess;
let isNVStore = 1;
-let mayStore = 1;
let isNewValue = 1;
+let mayStore = 1;
let BaseOpcode = "S2_storerb_pbr";
let opNewValue = 3;
let Constraints = "$Rx32 = $Rx32in";
@@ -20034,7 +20112,7 @@ def S2_storerbnew_pci : HInst<
(outs IntRegs:$Rx32),
(ins IntRegs:$Rx32in, s4_0Imm:$Ii, ModRegs:$Mu2, IntRegs:$Nt8),
"memb($Rx32++#$Ii:circ($Mu2)) = $Nt8.new",
-NCJ_tc_3or4stall_SLOT0, TypeST>, Enc_5326450 {
+tc_9c68db63, TypeST>, Enc_96ce4f {
let Inst{2-0} = 0b000;
let Inst{7-7} = 0b0;
let Inst{12-11} = 0b00;
@@ -20042,8 +20120,8 @@ let Inst{31-21} = 0b10101001101;
let addrMode = PostInc;
let accessSize = ByteAccess;
let isNVStore = 1;
-let mayStore = 1;
let isNewValue = 1;
+let mayStore = 1;
let Uses = [CS];
let opNewValue = 4;
let Constraints = "$Rx32 = $Rx32in";
@@ -20052,15 +20130,15 @@ def S2_storerbnew_pcr : HInst<
(outs IntRegs:$Rx32),
(ins IntRegs:$Rx32in, ModRegs:$Mu2, IntRegs:$Nt8),
"memb($Rx32++I:circ($Mu2)) = $Nt8.new",
-NCJ_tc_3or4stall_SLOT0, TypeST>, Enc_10067774 {
+tc_c8f9a6f6, TypeST>, Enc_8dbe85 {
let Inst{7-0} = 0b00000010;
let Inst{12-11} = 0b00;
let Inst{31-21} = 0b10101001101;
let addrMode = PostInc;
let accessSize = ByteAccess;
let isNVStore = 1;
-let mayStore = 1;
let isNewValue = 1;
+let mayStore = 1;
let Uses = [CS];
let opNewValue = 3;
let Constraints = "$Rx32 = $Rx32in";
@@ -20069,7 +20147,7 @@ def S2_storerbnew_pi : HInst<
(outs IntRegs:$Rx32),
(ins IntRegs:$Rx32in, s4_0Imm:$Ii, IntRegs:$Nt8),
"memb($Rx32++#$Ii) = $Nt8.new",
-ST_tc_st_pi_SLOT0, TypeST>, Enc_5900401, AddrModeRel {
+tc_c8f9a6f6, TypeST>, Enc_c7cd90, AddrModeRel {
let Inst{2-0} = 0b000;
let Inst{7-7} = 0b0;
let Inst{13-11} = 0b000;
@@ -20077,8 +20155,8 @@ let Inst{31-21} = 0b10101011101;
let addrMode = PostInc;
let accessSize = ByteAccess;
let isNVStore = 1;
-let mayStore = 1;
let isNewValue = 1;
+let mayStore = 1;
let BaseOpcode = "S2_storerb_pi";
let isPredicable = 1;
let isNVStorable = 1;
@@ -20089,15 +20167,15 @@ def S2_storerbnew_pr : HInst<
(outs IntRegs:$Rx32),
(ins IntRegs:$Rx32in, ModRegs:$Mu2, IntRegs:$Nt8),
"memb($Rx32++$Mu2) = $Nt8.new",
-ST_tc_st_SLOT0, TypeST>, Enc_10067774 {
+tc_c8f9a6f6, TypeST>, Enc_8dbe85 {
let Inst{7-0} = 0b00000000;
let Inst{12-11} = 0b00;
let Inst{31-21} = 0b10101101101;
let addrMode = PostInc;
let accessSize = ByteAccess;
let isNVStore = 1;
-let mayStore = 1;
let isNewValue = 1;
+let mayStore = 1;
let opNewValue = 3;
let Constraints = "$Rx32 = $Rx32in";
}
@@ -20105,7 +20183,7 @@ def S2_storerbnew_zomap : HInst<
(outs),
(ins IntRegs:$Rs32, IntRegs:$Nt8),
"memb($Rs32) = $Nt8.new",
-PSEUDO, TypeMAPPING> {
+tc_6c576d46, TypeMAPPING> {
let isPseudo = 1;
let isCodeGenOnly = 1;
let opNewValue = 1;
@@ -20114,14 +20192,14 @@ def S2_storerbnewgp : HInst<
(outs),
(ins u32_0Imm:$Ii, IntRegs:$Nt8),
"memb(gp+#$Ii) = $Nt8.new",
-V2LDST_tc_st_SLOT0, TypeV2LDST>, Enc_4050532, AddrModeRel {
+tc_9e86015f, TypeV2LDST>, Enc_ad1831, AddrModeRel {
let Inst{12-11} = 0b00;
let Inst{24-21} = 0b0101;
let Inst{31-27} = 0b01001;
let accessSize = ByteAccess;
let isNVStore = 1;
-let mayStore = 1;
let isNewValue = 1;
+let mayStore = 1;
let Uses = [GP];
let BaseOpcode = "S2_storerbabs";
let isPredicable = 1;
@@ -20135,7 +20213,7 @@ def S2_storerd_io : HInst<
(outs),
(ins IntRegs:$Rs32, s29_3Imm:$Ii, DoubleRegs:$Rtt32),
"memd($Rs32+#$Ii) = $Rtt32",
-ST_tc_st_SLOT01, TypeST>, Enc_16319737, AddrModeRel {
+tc_53ee6546, TypeST>, Enc_ce6828, AddrModeRel {
let Inst{24-21} = 0b1110;
let Inst{31-27} = 0b10100;
let addrMode = BaseImmOffset;
@@ -20155,7 +20233,7 @@ def S2_storerd_pbr : HInst<
(outs IntRegs:$Rx32),
(ins IntRegs:$Rx32in, ModRegs:$Mu2, DoubleRegs:$Rtt32),
"memd($Rx32++$Mu2:brev) = $Rtt32",
-ST_tc_st_SLOT01, TypeST>, Enc_15816255 {
+tc_20a8e109, TypeST>, Enc_928ca1 {
let Inst{7-0} = 0b00000000;
let Inst{31-21} = 0b10101111110;
let accessSize = DoubleWordAccess;
@@ -20166,7 +20244,7 @@ def S2_storerd_pci : HInst<
(outs IntRegs:$Rx32),
(ins IntRegs:$Rx32in, s4_3Imm:$Ii, ModRegs:$Mu2, DoubleRegs:$Rtt32),
"memd($Rx32++#$Ii:circ($Mu2)) = $Rtt32",
-ST_tc_st_SLOT01, TypeST>, Enc_4501395 {
+tc_251c87b2, TypeST>, Enc_395cc4 {
let Inst{2-0} = 0b000;
let Inst{7-7} = 0b0;
let Inst{31-21} = 0b10101001110;
@@ -20180,7 +20258,7 @@ def S2_storerd_pcr : HInst<
(outs IntRegs:$Rx32),
(ins IntRegs:$Rx32in, ModRegs:$Mu2, DoubleRegs:$Rtt32),
"memd($Rx32++I:circ($Mu2)) = $Rtt32",
-ST_tc_st_SLOT01, TypeST>, Enc_15816255 {
+tc_20a8e109, TypeST>, Enc_928ca1 {
let Inst{7-0} = 0b00000010;
let Inst{31-21} = 0b10101001110;
let addrMode = PostInc;
@@ -20193,7 +20271,7 @@ def S2_storerd_pi : HInst<
(outs IntRegs:$Rx32),
(ins IntRegs:$Rx32in, s4_3Imm:$Ii, DoubleRegs:$Rtt32),
"memd($Rx32++#$Ii) = $Rtt32",
-ST_tc_st_pi_SLOT01, TypeST>, Enc_11271630, AddrModeRel {
+tc_20a8e109, TypeST>, Enc_85bf58, AddrModeRel {
let Inst{2-0} = 0b000;
let Inst{7-7} = 0b0;
let Inst{13-13} = 0b0;
@@ -20210,7 +20288,7 @@ def S2_storerd_pr : HInst<
(outs IntRegs:$Rx32),
(ins IntRegs:$Rx32in, ModRegs:$Mu2, DoubleRegs:$Rtt32),
"memd($Rx32++$Mu2) = $Rtt32",
-ST_tc_st_SLOT01, TypeST>, Enc_15816255 {
+tc_20a8e109, TypeST>, Enc_928ca1 {
let Inst{7-0} = 0b00000000;
let Inst{31-21} = 0b10101101110;
let addrMode = PostInc;
@@ -20222,7 +20300,7 @@ def S2_storerd_zomap : HInst<
(outs),
(ins IntRegs:$Rs32, DoubleRegs:$Rtt32),
"memd($Rs32) = $Rtt32",
-PSEUDO, TypeMAPPING> {
+tc_53ee6546, TypeMAPPING> {
let isPseudo = 1;
let isCodeGenOnly = 1;
}
@@ -20230,7 +20308,7 @@ def S2_storerdgp : HInst<
(outs),
(ins u29_3Imm:$Ii, DoubleRegs:$Rtt32),
"memd(gp+#$Ii) = $Rtt32",
-V2LDST_tc_st_SLOT01, TypeV2LDST>, Enc_11682941, AddrModeRel {
+tc_c14739d5, TypeV2LDST>, Enc_5c124a, AddrModeRel {
let Inst{24-21} = 0b0110;
let Inst{31-27} = 0b01001;
let accessSize = DoubleWordAccess;
@@ -20247,7 +20325,7 @@ def S2_storerf_io : HInst<
(outs),
(ins IntRegs:$Rs32, s31_1Imm:$Ii, IntRegs:$Rt32),
"memh($Rs32+#$Ii) = $Rt32.h",
-ST_tc_st_SLOT01, TypeST>, Enc_7736768, AddrModeRel {
+tc_53ee6546, TypeST>, Enc_e957fb, AddrModeRel {
let Inst{24-21} = 0b1011;
let Inst{31-27} = 0b10100;
let addrMode = BaseImmOffset;
@@ -20267,7 +20345,7 @@ def S2_storerf_pbr : HInst<
(outs IntRegs:$Rx32),
(ins IntRegs:$Rx32in, ModRegs:$Mu2, IntRegs:$Rt32),
"memh($Rx32++$Mu2:brev) = $Rt32.h",
-ST_tc_st_SLOT01, TypeST>, Enc_7255914 {
+tc_20a8e109, TypeST>, Enc_d5c73f {
let Inst{7-0} = 0b00000000;
let Inst{31-21} = 0b10101111011;
let accessSize = HalfWordAccess;
@@ -20278,7 +20356,7 @@ def S2_storerf_pci : HInst<
(outs IntRegs:$Rx32),
(ins IntRegs:$Rx32in, s4_1Imm:$Ii, ModRegs:$Mu2, IntRegs:$Rt32),
"memh($Rx32++#$Ii:circ($Mu2)) = $Rt32.h",
-ST_tc_st_SLOT01, TypeST>, Enc_10915758 {
+tc_251c87b2, TypeST>, Enc_935d9b {
let Inst{2-0} = 0b000;
let Inst{7-7} = 0b0;
let Inst{31-21} = 0b10101001011;
@@ -20292,7 +20370,7 @@ def S2_storerf_pcr : HInst<
(outs IntRegs:$Rx32),
(ins IntRegs:$Rx32in, ModRegs:$Mu2, IntRegs:$Rt32),
"memh($Rx32++I:circ($Mu2)) = $Rt32.h",
-ST_tc_st_SLOT01, TypeST>, Enc_7255914 {
+tc_20a8e109, TypeST>, Enc_d5c73f {
let Inst{7-0} = 0b00000010;
let Inst{31-21} = 0b10101001011;
let addrMode = PostInc;
@@ -20305,7 +20383,7 @@ def S2_storerf_pi : HInst<
(outs IntRegs:$Rx32),
(ins IntRegs:$Rx32in, s4_1Imm:$Ii, IntRegs:$Rt32),
"memh($Rx32++#$Ii) = $Rt32.h",
-ST_tc_st_pi_SLOT01, TypeST>, Enc_11492529, AddrModeRel {
+tc_20a8e109, TypeST>, Enc_052c7d, AddrModeRel {
let Inst{2-0} = 0b000;
let Inst{7-7} = 0b0;
let Inst{13-13} = 0b0;
@@ -20322,7 +20400,7 @@ def S2_storerf_pr : HInst<
(outs IntRegs:$Rx32),
(ins IntRegs:$Rx32in, ModRegs:$Mu2, IntRegs:$Rt32),
"memh($Rx32++$Mu2) = $Rt32.h",
-ST_tc_st_SLOT01, TypeST>, Enc_7255914 {
+tc_20a8e109, TypeST>, Enc_d5c73f {
let Inst{7-0} = 0b00000000;
let Inst{31-21} = 0b10101101011;
let addrMode = PostInc;
@@ -20334,7 +20412,7 @@ def S2_storerf_zomap : HInst<
(outs),
(ins IntRegs:$Rs32, IntRegs:$Rt32),
"memh($Rs32) = $Rt32.h",
-PSEUDO, TypeMAPPING> {
+tc_53ee6546, TypeMAPPING> {
let isPseudo = 1;
let isCodeGenOnly = 1;
}
@@ -20342,7 +20420,7 @@ def S2_storerfgp : HInst<
(outs),
(ins u31_1Imm:$Ii, IntRegs:$Rt32),
"memh(gp+#$Ii) = $Rt32.h",
-V2LDST_tc_st_SLOT01, TypeV2LDST>, Enc_1186018, AddrModeRel {
+tc_c14739d5, TypeV2LDST>, Enc_fda92c, AddrModeRel {
let Inst{24-21} = 0b0011;
let Inst{31-27} = 0b01001;
let accessSize = HalfWordAccess;
@@ -20359,7 +20437,7 @@ def S2_storerh_io : HInst<
(outs),
(ins IntRegs:$Rs32, s31_1Imm:$Ii, IntRegs:$Rt32),
"memh($Rs32+#$Ii) = $Rt32",
-ST_tc_st_SLOT01, TypeST>, Enc_7736768, AddrModeRel {
+tc_53ee6546, TypeST>, Enc_e957fb, AddrModeRel {
let Inst{24-21} = 0b1010;
let Inst{31-27} = 0b10100;
let addrMode = BaseImmOffset;
@@ -20380,7 +20458,7 @@ def S2_storerh_pbr : HInst<
(outs IntRegs:$Rx32),
(ins IntRegs:$Rx32in, ModRegs:$Mu2, IntRegs:$Rt32),
"memh($Rx32++$Mu2:brev) = $Rt32",
-ST_tc_st_SLOT01, TypeST>, Enc_7255914, AddrModeRel {
+tc_20a8e109, TypeST>, Enc_d5c73f, AddrModeRel {
let Inst{7-0} = 0b00000000;
let Inst{31-21} = 0b10101111010;
let accessSize = HalfWordAccess;
@@ -20393,7 +20471,7 @@ def S2_storerh_pci : HInst<
(outs IntRegs:$Rx32),
(ins IntRegs:$Rx32in, s4_1Imm:$Ii, ModRegs:$Mu2, IntRegs:$Rt32),
"memh($Rx32++#$Ii:circ($Mu2)) = $Rt32",
-ST_tc_st_SLOT01, TypeST>, Enc_10915758 {
+tc_251c87b2, TypeST>, Enc_935d9b {
let Inst{2-0} = 0b000;
let Inst{7-7} = 0b0;
let Inst{31-21} = 0b10101001010;
@@ -20408,7 +20486,7 @@ def S2_storerh_pcr : HInst<
(outs IntRegs:$Rx32),
(ins IntRegs:$Rx32in, ModRegs:$Mu2, IntRegs:$Rt32),
"memh($Rx32++I:circ($Mu2)) = $Rt32",
-ST_tc_st_SLOT01, TypeST>, Enc_7255914 {
+tc_20a8e109, TypeST>, Enc_d5c73f {
let Inst{7-0} = 0b00000010;
let Inst{31-21} = 0b10101001010;
let addrMode = PostInc;
@@ -20422,7 +20500,7 @@ def S2_storerh_pi : HInst<
(outs IntRegs:$Rx32),
(ins IntRegs:$Rx32in, s4_1Imm:$Ii, IntRegs:$Rt32),
"memh($Rx32++#$Ii) = $Rt32",
-ST_tc_st_pi_SLOT01, TypeST>, Enc_11492529, AddrModeRel {
+tc_20a8e109, TypeST>, Enc_052c7d, AddrModeRel {
let Inst{2-0} = 0b000;
let Inst{7-7} = 0b0;
let Inst{13-13} = 0b0;
@@ -20439,7 +20517,7 @@ def S2_storerh_pr : HInst<
(outs IntRegs:$Rx32),
(ins IntRegs:$Rx32in, ModRegs:$Mu2, IntRegs:$Rt32),
"memh($Rx32++$Mu2) = $Rt32",
-ST_tc_st_SLOT01, TypeST>, Enc_7255914 {
+tc_20a8e109, TypeST>, Enc_d5c73f {
let Inst{7-0} = 0b00000000;
let Inst{31-21} = 0b10101101010;
let addrMode = PostInc;
@@ -20452,7 +20530,7 @@ def S2_storerh_zomap : HInst<
(outs),
(ins IntRegs:$Rs32, IntRegs:$Rt32),
"memh($Rs32) = $Rt32",
-PSEUDO, TypeMAPPING> {
+tc_53ee6546, TypeMAPPING> {
let isPseudo = 1;
let isCodeGenOnly = 1;
}
@@ -20460,7 +20538,7 @@ def S2_storerhgp : HInst<
(outs),
(ins u31_1Imm:$Ii, IntRegs:$Rt32),
"memh(gp+#$Ii) = $Rt32",
-V2LDST_tc_st_SLOT01, TypeV2LDST>, Enc_1186018, AddrModeRel {
+tc_c14739d5, TypeV2LDST>, Enc_fda92c, AddrModeRel {
let Inst{24-21} = 0b0010;
let Inst{31-27} = 0b01001;
let accessSize = HalfWordAccess;
@@ -20478,15 +20556,15 @@ def S2_storerhnew_io : HInst<
(outs),
(ins IntRegs:$Rs32, s31_1Imm:$Ii, IntRegs:$Nt8),
"memh($Rs32+#$Ii) = $Nt8.new",
-ST_tc_st_SLOT0, TypeST>, Enc_748676, AddrModeRel {
+tc_6c576d46, TypeST>, Enc_0d8870, AddrModeRel {
let Inst{12-11} = 0b01;
let Inst{24-21} = 0b1101;
let Inst{31-27} = 0b10100;
let addrMode = BaseImmOffset;
let accessSize = HalfWordAccess;
let isNVStore = 1;
-let mayStore = 1;
let isNewValue = 1;
+let mayStore = 1;
let CextOpcode = "S2_storerh";
let InputType = "imm";
let BaseOpcode = "S2_storerh_io";
@@ -20502,14 +20580,14 @@ def S2_storerhnew_pbr : HInst<
(outs IntRegs:$Rx32),
(ins IntRegs:$Rx32in, ModRegs:$Mu2, IntRegs:$Nt8),
"memh($Rx32++$Mu2:brev) = $Nt8.new",
-NCJ_tc_3or4stall_SLOT0, TypeST>, Enc_10067774, AddrModeRel {
+tc_c8f9a6f6, TypeST>, Enc_8dbe85, AddrModeRel {
let Inst{7-0} = 0b00000000;
let Inst{12-11} = 0b01;
let Inst{31-21} = 0b10101111101;
let accessSize = HalfWordAccess;
let isNVStore = 1;
-let mayStore = 1;
let isNewValue = 1;
+let mayStore = 1;
let BaseOpcode = "S2_storerh_pbr";
let opNewValue = 3;
let Constraints = "$Rx32 = $Rx32in";
@@ -20518,7 +20596,7 @@ def S2_storerhnew_pci : HInst<
(outs IntRegs:$Rx32),
(ins IntRegs:$Rx32in, s4_1Imm:$Ii, ModRegs:$Mu2, IntRegs:$Nt8),
"memh($Rx32++#$Ii:circ($Mu2)) = $Nt8.new",
-NCJ_tc_3or4stall_SLOT0, TypeST>, Enc_10326434 {
+tc_9c68db63, TypeST>, Enc_91b9fe {
let Inst{2-0} = 0b000;
let Inst{7-7} = 0b0;
let Inst{12-11} = 0b01;
@@ -20526,8 +20604,8 @@ let Inst{31-21} = 0b10101001101;
let addrMode = PostInc;
let accessSize = HalfWordAccess;
let isNVStore = 1;
-let mayStore = 1;
let isNewValue = 1;
+let mayStore = 1;
let Uses = [CS];
let opNewValue = 4;
let Constraints = "$Rx32 = $Rx32in";
@@ -20536,15 +20614,15 @@ def S2_storerhnew_pcr : HInst<
(outs IntRegs:$Rx32),
(ins IntRegs:$Rx32in, ModRegs:$Mu2, IntRegs:$Nt8),
"memh($Rx32++I:circ($Mu2)) = $Nt8.new",
-NCJ_tc_3or4stall_SLOT0, TypeST>, Enc_10067774 {
+tc_c8f9a6f6, TypeST>, Enc_8dbe85 {
let Inst{7-0} = 0b00000010;
let Inst{12-11} = 0b01;
let Inst{31-21} = 0b10101001101;
let addrMode = PostInc;
let accessSize = HalfWordAccess;
let isNVStore = 1;
-let mayStore = 1;
let isNewValue = 1;
+let mayStore = 1;
let Uses = [CS];
let opNewValue = 3;
let Constraints = "$Rx32 = $Rx32in";
@@ -20553,7 +20631,7 @@ def S2_storerhnew_pi : HInst<
(outs IntRegs:$Rx32),
(ins IntRegs:$Rx32in, s4_1Imm:$Ii, IntRegs:$Nt8),
"memh($Rx32++#$Ii) = $Nt8.new",
-ST_tc_st_pi_SLOT0, TypeST>, Enc_6900405, AddrModeRel {
+tc_c8f9a6f6, TypeST>, Enc_e26546, AddrModeRel {
let Inst{2-0} = 0b000;
let Inst{7-7} = 0b0;
let Inst{13-11} = 0b001;
@@ -20561,8 +20639,8 @@ let Inst{31-21} = 0b10101011101;
let addrMode = PostInc;
let accessSize = HalfWordAccess;
let isNVStore = 1;
-let mayStore = 1;
let isNewValue = 1;
+let mayStore = 1;
let BaseOpcode = "S2_storerh_pi";
let isNVStorable = 1;
let isPredicable = 1;
@@ -20573,15 +20651,15 @@ def S2_storerhnew_pr : HInst<
(outs IntRegs:$Rx32),
(ins IntRegs:$Rx32in, ModRegs:$Mu2, IntRegs:$Nt8),
"memh($Rx32++$Mu2) = $Nt8.new",
-ST_tc_st_SLOT0, TypeST>, Enc_10067774 {
+tc_c8f9a6f6, TypeST>, Enc_8dbe85 {
let Inst{7-0} = 0b00000000;
let Inst{12-11} = 0b01;
let Inst{31-21} = 0b10101101101;
let addrMode = PostInc;
let accessSize = HalfWordAccess;
let isNVStore = 1;
-let mayStore = 1;
let isNewValue = 1;
+let mayStore = 1;
let opNewValue = 3;
let Constraints = "$Rx32 = $Rx32in";
}
@@ -20589,7 +20667,7 @@ def S2_storerhnew_zomap : HInst<
(outs),
(ins IntRegs:$Rs32, IntRegs:$Nt8),
"memh($Rs32) = $Nt8.new",
-PSEUDO, TypeMAPPING> {
+tc_6c576d46, TypeMAPPING> {
let isPseudo = 1;
let isCodeGenOnly = 1;
let opNewValue = 1;
@@ -20598,14 +20676,14 @@ def S2_storerhnewgp : HInst<
(outs),
(ins u31_1Imm:$Ii, IntRegs:$Nt8),
"memh(gp+#$Ii) = $Nt8.new",
-V2LDST_tc_st_SLOT0, TypeV2LDST>, Enc_13618890, AddrModeRel {
+tc_9e86015f, TypeV2LDST>, Enc_bc03e5, AddrModeRel {
let Inst{12-11} = 0b01;
let Inst{24-21} = 0b0101;
let Inst{31-27} = 0b01001;
let accessSize = HalfWordAccess;
let isNVStore = 1;
-let mayStore = 1;
let isNewValue = 1;
+let mayStore = 1;
let Uses = [GP];
let BaseOpcode = "S2_storerhabs";
let isPredicable = 1;
@@ -20619,7 +20697,7 @@ def S2_storeri_io : HInst<
(outs),
(ins IntRegs:$Rs32, s30_2Imm:$Ii, IntRegs:$Rt32),
"memw($Rs32+#$Ii) = $Rt32",
-ST_tc_st_SLOT01, TypeST>, Enc_6673186, AddrModeRel {
+tc_53ee6546, TypeST>, Enc_143445, AddrModeRel {
let Inst{24-21} = 0b1100;
let Inst{31-27} = 0b10100;
let addrMode = BaseImmOffset;
@@ -20640,7 +20718,7 @@ def S2_storeri_pbr : HInst<
(outs IntRegs:$Rx32),
(ins IntRegs:$Rx32in, ModRegs:$Mu2, IntRegs:$Rt32),
"memw($Rx32++$Mu2:brev) = $Rt32",
-ST_tc_st_SLOT01, TypeST>, Enc_7255914, AddrModeRel {
+tc_20a8e109, TypeST>, Enc_d5c73f, AddrModeRel {
let Inst{7-0} = 0b00000000;
let Inst{31-21} = 0b10101111100;
let accessSize = WordAccess;
@@ -20653,7 +20731,7 @@ def S2_storeri_pci : HInst<
(outs IntRegs:$Rx32),
(ins IntRegs:$Rx32in, s4_2Imm:$Ii, ModRegs:$Mu2, IntRegs:$Rt32),
"memw($Rx32++#$Ii:circ($Mu2)) = $Rt32",
-ST_tc_st_SLOT01, TypeST>, Enc_9915754 {
+tc_251c87b2, TypeST>, Enc_79b8c8 {
let Inst{2-0} = 0b000;
let Inst{7-7} = 0b0;
let Inst{31-21} = 0b10101001100;
@@ -20668,7 +20746,7 @@ def S2_storeri_pcr : HInst<
(outs IntRegs:$Rx32),
(ins IntRegs:$Rx32in, ModRegs:$Mu2, IntRegs:$Rt32),
"memw($Rx32++I:circ($Mu2)) = $Rt32",
-ST_tc_st_SLOT01, TypeST>, Enc_7255914 {
+tc_20a8e109, TypeST>, Enc_d5c73f {
let Inst{7-0} = 0b00000010;
let Inst{31-21} = 0b10101001100;
let addrMode = PostInc;
@@ -20682,7 +20760,7 @@ def S2_storeri_pi : HInst<
(outs IntRegs:$Rx32),
(ins IntRegs:$Rx32in, s4_2Imm:$Ii, IntRegs:$Rt32),
"memw($Rx32++#$Ii) = $Rt32",
-ST_tc_st_pi_SLOT01, TypeST>, Enc_10492541, AddrModeRel {
+tc_20a8e109, TypeST>, Enc_db40cd, AddrModeRel {
let Inst{2-0} = 0b000;
let Inst{7-7} = 0b0;
let Inst{13-13} = 0b0;
@@ -20699,7 +20777,7 @@ def S2_storeri_pr : HInst<
(outs IntRegs:$Rx32),
(ins IntRegs:$Rx32in, ModRegs:$Mu2, IntRegs:$Rt32),
"memw($Rx32++$Mu2) = $Rt32",
-ST_tc_st_SLOT01, TypeST>, Enc_7255914 {
+tc_20a8e109, TypeST>, Enc_d5c73f {
let Inst{7-0} = 0b00000000;
let Inst{31-21} = 0b10101101100;
let addrMode = PostInc;
@@ -20712,7 +20790,7 @@ def S2_storeri_zomap : HInst<
(outs),
(ins IntRegs:$Rs32, IntRegs:$Rt32),
"memw($Rs32) = $Rt32",
-PSEUDO, TypeMAPPING> {
+tc_53ee6546, TypeMAPPING> {
let isPseudo = 1;
let isCodeGenOnly = 1;
}
@@ -20720,7 +20798,7 @@ def S2_storerigp : HInst<
(outs),
(ins u30_2Imm:$Ii, IntRegs:$Rt32),
"memw(gp+#$Ii) = $Rt32",
-V2LDST_tc_st_SLOT01, TypeV2LDST>, Enc_15999208, AddrModeRel {
+tc_c14739d5, TypeV2LDST>, Enc_541f26, AddrModeRel {
let Inst{24-21} = 0b0100;
let Inst{31-27} = 0b01001;
let accessSize = WordAccess;
@@ -20738,15 +20816,15 @@ def S2_storerinew_io : HInst<
(outs),
(ins IntRegs:$Rs32, s30_2Imm:$Ii, IntRegs:$Nt8),
"memw($Rs32+#$Ii) = $Nt8.new",
-ST_tc_st_SLOT0, TypeST>, Enc_8409782, AddrModeRel {
+tc_6c576d46, TypeST>, Enc_690862, AddrModeRel {
let Inst{12-11} = 0b10;
let Inst{24-21} = 0b1101;
let Inst{31-27} = 0b10100;
let addrMode = BaseImmOffset;
let accessSize = WordAccess;
let isNVStore = 1;
-let mayStore = 1;
let isNewValue = 1;
+let mayStore = 1;
let CextOpcode = "S2_storeri";
let InputType = "imm";
let BaseOpcode = "S2_storeri_io";
@@ -20762,14 +20840,14 @@ def S2_storerinew_pbr : HInst<
(outs IntRegs:$Rx32),
(ins IntRegs:$Rx32in, ModRegs:$Mu2, IntRegs:$Nt8),
"memw($Rx32++$Mu2:brev) = $Nt8.new",
-NCJ_tc_3or4stall_SLOT0, TypeST>, Enc_10067774, AddrModeRel {
+tc_c8f9a6f6, TypeST>, Enc_8dbe85, AddrModeRel {
let Inst{7-0} = 0b00000000;
let Inst{12-11} = 0b10;
let Inst{31-21} = 0b10101111101;
let accessSize = WordAccess;
let isNVStore = 1;
-let mayStore = 1;
let isNewValue = 1;
+let mayStore = 1;
let BaseOpcode = "S2_storeri_pbr";
let opNewValue = 3;
let Constraints = "$Rx32 = $Rx32in";
@@ -20778,7 +20856,7 @@ def S2_storerinew_pci : HInst<
(outs IntRegs:$Rx32),
(ins IntRegs:$Rx32in, s4_2Imm:$Ii, ModRegs:$Mu2, IntRegs:$Nt8),
"memw($Rx32++#$Ii:circ($Mu2)) = $Nt8.new",
-NCJ_tc_3or4stall_SLOT0, TypeST>, Enc_11326438 {
+tc_9c68db63, TypeST>, Enc_3f97c8 {
let Inst{2-0} = 0b000;
let Inst{7-7} = 0b0;
let Inst{12-11} = 0b10;
@@ -20786,8 +20864,8 @@ let Inst{31-21} = 0b10101001101;
let addrMode = PostInc;
let accessSize = WordAccess;
let isNVStore = 1;
-let mayStore = 1;
let isNewValue = 1;
+let mayStore = 1;
let Uses = [CS];
let opNewValue = 4;
let Constraints = "$Rx32 = $Rx32in";
@@ -20796,15 +20874,15 @@ def S2_storerinew_pcr : HInst<
(outs IntRegs:$Rx32),
(ins IntRegs:$Rx32in, ModRegs:$Mu2, IntRegs:$Nt8),
"memw($Rx32++I:circ($Mu2)) = $Nt8.new",
-NCJ_tc_3or4stall_SLOT0, TypeST>, Enc_10067774 {
+tc_c8f9a6f6, TypeST>, Enc_8dbe85 {
let Inst{7-0} = 0b00000010;
let Inst{12-11} = 0b10;
let Inst{31-21} = 0b10101001101;
let addrMode = PostInc;
let accessSize = WordAccess;
let isNVStore = 1;
-let mayStore = 1;
let isNewValue = 1;
+let mayStore = 1;
let Uses = [CS];
let opNewValue = 3;
let Constraints = "$Rx32 = $Rx32in";
@@ -20813,7 +20891,7 @@ def S2_storerinew_pi : HInst<
(outs IntRegs:$Rx32),
(ins IntRegs:$Rx32in, s4_2Imm:$Ii, IntRegs:$Nt8),
"memw($Rx32++#$Ii) = $Nt8.new",
-ST_tc_st_pi_SLOT0, TypeST>, Enc_7900405, AddrModeRel {
+tc_c8f9a6f6, TypeST>, Enc_223005, AddrModeRel {
let Inst{2-0} = 0b000;
let Inst{7-7} = 0b0;
let Inst{13-11} = 0b010;
@@ -20821,8 +20899,8 @@ let Inst{31-21} = 0b10101011101;
let addrMode = PostInc;
let accessSize = WordAccess;
let isNVStore = 1;
-let mayStore = 1;
let isNewValue = 1;
+let mayStore = 1;
let BaseOpcode = "S2_storeri_pi";
let isPredicable = 1;
let opNewValue = 3;
@@ -20832,15 +20910,15 @@ def S2_storerinew_pr : HInst<
(outs IntRegs:$Rx32),
(ins IntRegs:$Rx32in, ModRegs:$Mu2, IntRegs:$Nt8),
"memw($Rx32++$Mu2) = $Nt8.new",
-ST_tc_st_SLOT0, TypeST>, Enc_10067774 {
+tc_c8f9a6f6, TypeST>, Enc_8dbe85 {
let Inst{7-0} = 0b00000000;
let Inst{12-11} = 0b10;
let Inst{31-21} = 0b10101101101;
let addrMode = PostInc;
let accessSize = WordAccess;
let isNVStore = 1;
-let mayStore = 1;
let isNewValue = 1;
+let mayStore = 1;
let opNewValue = 3;
let Constraints = "$Rx32 = $Rx32in";
}
@@ -20848,7 +20926,7 @@ def S2_storerinew_zomap : HInst<
(outs),
(ins IntRegs:$Rs32, IntRegs:$Nt8),
"memw($Rs32) = $Nt8.new",
-PSEUDO, TypeMAPPING> {
+tc_6c576d46, TypeMAPPING> {
let isPseudo = 1;
let isCodeGenOnly = 1;
let opNewValue = 1;
@@ -20857,14 +20935,14 @@ def S2_storerinewgp : HInst<
(outs),
(ins u30_2Imm:$Ii, IntRegs:$Nt8),
"memw(gp+#$Ii) = $Nt8.new",
-V2LDST_tc_st_SLOT0, TypeV2LDST>, Enc_12297800, AddrModeRel {
+tc_9e86015f, TypeV2LDST>, Enc_78cbf0, AddrModeRel {
let Inst{12-11} = 0b10;
let Inst{24-21} = 0b0101;
let Inst{31-27} = 0b01001;
let accessSize = WordAccess;
let isNVStore = 1;
-let mayStore = 1;
let isNewValue = 1;
+let mayStore = 1;
let Uses = [GP];
let BaseOpcode = "S2_storeriabs";
let isPredicable = 1;
@@ -20878,20 +20956,20 @@ def S2_storew_locked : HInst<
(outs PredRegs:$Pd4),
(ins IntRegs:$Rs32, IntRegs:$Rt32),
"memw_locked($Rs32,$Pd4) = $Rt32",
-ST_tc_ld_SLOT0, TypeST>, Enc_10157519 {
+tc_7d01cbdc, TypeST>, Enc_c2b48e {
let Inst{7-2} = 0b000000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b10100000101;
let accessSize = WordAccess;
+let isPredicateLate = 1;
let isSoloAX = 1;
let mayStore = 1;
-let isPredicateLate = 1;
}
def S2_svsathb : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rs32),
"$Rd32 = vsathb($Rs32)",
-S_2op_tc_1_SLOT23, TypeS_2op>, Enc_4075554 {
+tc_b86c7e8b, TypeS_2op>, Enc_5e2823 {
let Inst{13-5} = 0b000000000;
let Inst{31-21} = 0b10001100100;
let hasNewValue = 1;
@@ -20902,7 +20980,7 @@ def S2_svsathub : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rs32),
"$Rd32 = vsathub($Rs32)",
-S_2op_tc_1_SLOT23, TypeS_2op>, Enc_4075554 {
+tc_b86c7e8b, TypeS_2op>, Enc_5e2823 {
let Inst{13-5} = 0b000000010;
let Inst{31-21} = 0b10001100100;
let hasNewValue = 1;
@@ -20913,7 +20991,7 @@ def S2_tableidxb : HInst<
(outs IntRegs:$Rx32),
(ins IntRegs:$Rx32in, IntRegs:$Rs32, u4_0Imm:$Ii, s6_0Imm:$II),
"$Rx32 = tableidxb($Rs32,#$Ii,#$II):raw",
-S_2op_tc_1_SLOT23, TypeS_2op>, Enc_8838398 {
+tc_d95f4e98, TypeS_2op>, Enc_cd82bc {
let Inst{31-22} = 0b1000011100;
let hasNewValue = 1;
let opNewValue = 0;
@@ -20924,7 +21002,7 @@ def S2_tableidxb_goodsyntax : HInst<
(outs IntRegs:$Rx32),
(ins IntRegs:$Rx32in, IntRegs:$Rs32, u4_0Imm:$Ii, u5_0Imm:$II),
"$Rx32 = tableidxb($Rs32,#$Ii,#$II)",
-S_2op_tc_1_SLOT23, TypeS_2op> {
+tc_d95f4e98, TypeS_2op> {
let hasNewValue = 1;
let opNewValue = 0;
let isPseudo = 1;
@@ -20935,7 +21013,7 @@ def S2_tableidxd : HInst<
(outs IntRegs:$Rx32),
(ins IntRegs:$Rx32in, IntRegs:$Rs32, u4_0Imm:$Ii, s6_0Imm:$II),
"$Rx32 = tableidxd($Rs32,#$Ii,#$II):raw",
-S_2op_tc_1_SLOT23, TypeS_2op>, Enc_8838398 {
+tc_d95f4e98, TypeS_2op>, Enc_cd82bc {
let Inst{31-22} = 0b1000011111;
let hasNewValue = 1;
let opNewValue = 0;
@@ -20946,7 +21024,7 @@ def S2_tableidxd_goodsyntax : HInst<
(outs IntRegs:$Rx32),
(ins IntRegs:$Rx32in, IntRegs:$Rs32, u4_0Imm:$Ii, u5_0Imm:$II),
"$Rx32 = tableidxd($Rs32,#$Ii,#$II)",
-S_2op_tc_1_SLOT23, TypeS_2op> {
+tc_d95f4e98, TypeS_2op> {
let hasNewValue = 1;
let opNewValue = 0;
let isPseudo = 1;
@@ -20956,7 +21034,7 @@ def S2_tableidxh : HInst<
(outs IntRegs:$Rx32),
(ins IntRegs:$Rx32in, IntRegs:$Rs32, u4_0Imm:$Ii, s6_0Imm:$II),
"$Rx32 = tableidxh($Rs32,#$Ii,#$II):raw",
-S_2op_tc_1_SLOT23, TypeS_2op>, Enc_8838398 {
+tc_d95f4e98, TypeS_2op>, Enc_cd82bc {
let Inst{31-22} = 0b1000011101;
let hasNewValue = 1;
let opNewValue = 0;
@@ -20967,7 +21045,7 @@ def S2_tableidxh_goodsyntax : HInst<
(outs IntRegs:$Rx32),
(ins IntRegs:$Rx32in, IntRegs:$Rs32, u4_0Imm:$Ii, u5_0Imm:$II),
"$Rx32 = tableidxh($Rs32,#$Ii,#$II)",
-S_2op_tc_1_SLOT23, TypeS_2op> {
+tc_d95f4e98, TypeS_2op> {
let hasNewValue = 1;
let opNewValue = 0;
let isPseudo = 1;
@@ -20977,7 +21055,7 @@ def S2_tableidxw : HInst<
(outs IntRegs:$Rx32),
(ins IntRegs:$Rx32in, IntRegs:$Rs32, u4_0Imm:$Ii, s6_0Imm:$II),
"$Rx32 = tableidxw($Rs32,#$Ii,#$II):raw",
-S_2op_tc_1_SLOT23, TypeS_2op>, Enc_8838398 {
+tc_d95f4e98, TypeS_2op>, Enc_cd82bc {
let Inst{31-22} = 0b1000011110;
let hasNewValue = 1;
let opNewValue = 0;
@@ -20988,7 +21066,7 @@ def S2_tableidxw_goodsyntax : HInst<
(outs IntRegs:$Rx32),
(ins IntRegs:$Rx32in, IntRegs:$Rs32, u4_0Imm:$Ii, u5_0Imm:$II),
"$Rx32 = tableidxw($Rs32,#$Ii,#$II)",
-S_2op_tc_1_SLOT23, TypeS_2op> {
+tc_d95f4e98, TypeS_2op> {
let hasNewValue = 1;
let opNewValue = 0;
let isPseudo = 1;
@@ -20998,7 +21076,7 @@ def S2_togglebit_i : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rs32, u5_0Imm:$Ii),
"$Rd32 = togglebit($Rs32,#$Ii)",
-S_2op_tc_1_SLOT23, TypeS_2op>, Enc_2771456 {
+tc_9c18c9a5, TypeS_2op>, Enc_a05677 {
let Inst{7-5} = 0b010;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b10001100110;
@@ -21009,7 +21087,7 @@ def S2_togglebit_r : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rs32, IntRegs:$Rt32),
"$Rd32 = togglebit($Rs32,$Rt32)",
-S_3op_tc_1_SLOT23, TypeS_3op>, Enc_14071773 {
+tc_9c18c9a5, TypeS_3op>, Enc_5ab2be {
let Inst{7-5} = 0b100;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11000110100;
@@ -21020,7 +21098,7 @@ def S2_tstbit_i : HInst<
(outs PredRegs:$Pd4),
(ins IntRegs:$Rs32, u5_0Imm:$Ii),
"$Pd4 = tstbit($Rs32,#$Ii)",
-S_2op_tc_2early_SLOT23, TypeS_2op>, Enc_2103742 {
+tc_5fa2857c, TypeS_2op>, Enc_83ee64 {
let Inst{7-2} = 0b000000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b10000101000;
@@ -21029,7 +21107,7 @@ def S2_tstbit_r : HInst<
(outs PredRegs:$Pd4),
(ins IntRegs:$Rs32, IntRegs:$Rt32),
"$Pd4 = tstbit($Rs32,$Rt32)",
-S_3op_tc_2early_SLOT23, TypeS_3op>, Enc_10157519 {
+tc_c58f771a, TypeS_3op>, Enc_c2b48e {
let Inst{7-2} = 0b000000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11000111000;
@@ -21038,7 +21116,7 @@ def S2_valignib : HInst<
(outs DoubleRegs:$Rdd32),
(ins DoubleRegs:$Rtt32, DoubleRegs:$Rss32, u3_0Imm:$Ii),
"$Rdd32 = valignb($Rtt32,$Rss32,#$Ii)",
-S_3op_tc_1_SLOT23, TypeS_3op>, Enc_11971407 {
+tc_d1b5a4b6, TypeS_3op>, Enc_729ff7 {
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11000000000;
}
@@ -21046,7 +21124,7 @@ def S2_valignrb : HInst<
(outs DoubleRegs:$Rdd32),
(ins DoubleRegs:$Rtt32, DoubleRegs:$Rss32, PredRegs:$Pu4),
"$Rdd32 = valignb($Rtt32,$Rss32,$Pu4)",
-S_3op_tc_1_SLOT23, TypeS_3op>, Enc_11552785 {
+tc_d1b5a4b6, TypeS_3op>, Enc_8c6530 {
let Inst{7-7} = 0b0;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11000010000;
@@ -21055,7 +21133,7 @@ def S2_vcnegh : HInst<
(outs DoubleRegs:$Rdd32),
(ins DoubleRegs:$Rss32, IntRegs:$Rt32),
"$Rdd32 = vcnegh($Rss32,$Rt32)",
-S_3op_tc_2_SLOT23, TypeS_3op>, Enc_8940892 {
+tc_47ab9233, TypeS_3op>, Enc_927852 {
let Inst{7-5} = 0b010;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11000011110;
@@ -21066,7 +21144,7 @@ def S2_vcrotate : HInst<
(outs DoubleRegs:$Rdd32),
(ins DoubleRegs:$Rss32, IntRegs:$Rt32),
"$Rdd32 = vcrotate($Rss32,$Rt32)",
-S_3op_tc_2_SLOT23, TypeS_3op>, Enc_8940892 {
+tc_63cd9d2d, TypeS_3op>, Enc_927852 {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11000011110;
@@ -21077,7 +21155,7 @@ def S2_vrcnegh : HInst<
(outs DoubleRegs:$Rxx32),
(ins DoubleRegs:$Rxx32in, DoubleRegs:$Rss32, IntRegs:$Rt32),
"$Rxx32 += vrcnegh($Rss32,$Rt32)",
-S_3op_tc_3x_SLOT23, TypeS_3op>, Enc_7912540 {
+tc_8cb685d9, TypeS_3op>, Enc_1aa186 {
let Inst{7-5} = 0b111;
let Inst{13-13} = 0b1;
let Inst{31-21} = 0b11001011001;
@@ -21088,28 +21166,30 @@ def S2_vrndpackwh : HInst<
(outs IntRegs:$Rd32),
(ins DoubleRegs:$Rss32),
"$Rd32 = vrndwh($Rss32)",
-S_2op_tc_2_SLOT23, TypeS_2op>, Enc_3742184 {
+tc_88fa2da6, TypeS_2op>, Enc_90cd8b {
let Inst{13-5} = 0b000000100;
let Inst{31-21} = 0b10001000100;
let hasNewValue = 1;
let opNewValue = 0;
+let prefersSlot3 = 1;
}
def S2_vrndpackwhs : HInst<
(outs IntRegs:$Rd32),
(ins DoubleRegs:$Rss32),
"$Rd32 = vrndwh($Rss32):sat",
-S_2op_tc_2_SLOT23, TypeS_2op>, Enc_3742184 {
+tc_94e6ffd9, TypeS_2op>, Enc_90cd8b {
let Inst{13-5} = 0b000000110;
let Inst{31-21} = 0b10001000100;
let hasNewValue = 1;
let opNewValue = 0;
+let prefersSlot3 = 1;
let Defs = [USR_OVF];
}
def S2_vsathb : HInst<
(outs IntRegs:$Rd32),
(ins DoubleRegs:$Rss32),
"$Rd32 = vsathb($Rss32)",
-S_2op_tc_1_SLOT23, TypeS_2op>, Enc_3742184 {
+tc_b86c7e8b, TypeS_2op>, Enc_90cd8b {
let Inst{13-5} = 0b000000110;
let Inst{31-21} = 0b10001000000;
let hasNewValue = 1;
@@ -21120,7 +21200,7 @@ def S2_vsathb_nopack : HInst<
(outs DoubleRegs:$Rdd32),
(ins DoubleRegs:$Rss32),
"$Rdd32 = vsathb($Rss32)",
-S_2op_tc_1_SLOT23, TypeS_2op>, Enc_13133231 {
+tc_b86c7e8b, TypeS_2op>, Enc_b9c5fb {
let Inst{13-5} = 0b000000111;
let Inst{31-21} = 0b10000000000;
let Defs = [USR_OVF];
@@ -21129,7 +21209,7 @@ def S2_vsathub : HInst<
(outs IntRegs:$Rd32),
(ins DoubleRegs:$Rss32),
"$Rd32 = vsathub($Rss32)",
-S_2op_tc_1_SLOT23, TypeS_2op>, Enc_3742184 {
+tc_b86c7e8b, TypeS_2op>, Enc_90cd8b {
let Inst{13-5} = 0b000000000;
let Inst{31-21} = 0b10001000000;
let hasNewValue = 1;
@@ -21140,7 +21220,7 @@ def S2_vsathub_nopack : HInst<
(outs DoubleRegs:$Rdd32),
(ins DoubleRegs:$Rss32),
"$Rdd32 = vsathub($Rss32)",
-S_2op_tc_1_SLOT23, TypeS_2op>, Enc_13133231 {
+tc_b86c7e8b, TypeS_2op>, Enc_b9c5fb {
let Inst{13-5} = 0b000000100;
let Inst{31-21} = 0b10000000000;
let Defs = [USR_OVF];
@@ -21149,7 +21229,7 @@ def S2_vsatwh : HInst<
(outs IntRegs:$Rd32),
(ins DoubleRegs:$Rss32),
"$Rd32 = vsatwh($Rss32)",
-S_2op_tc_1_SLOT23, TypeS_2op>, Enc_3742184 {
+tc_b86c7e8b, TypeS_2op>, Enc_90cd8b {
let Inst{13-5} = 0b000000010;
let Inst{31-21} = 0b10001000000;
let hasNewValue = 1;
@@ -21160,7 +21240,7 @@ def S2_vsatwh_nopack : HInst<
(outs DoubleRegs:$Rdd32),
(ins DoubleRegs:$Rss32),
"$Rdd32 = vsatwh($Rss32)",
-S_2op_tc_1_SLOT23, TypeS_2op>, Enc_13133231 {
+tc_b86c7e8b, TypeS_2op>, Enc_b9c5fb {
let Inst{13-5} = 0b000000110;
let Inst{31-21} = 0b10000000000;
let Defs = [USR_OVF];
@@ -21169,7 +21249,7 @@ def S2_vsatwuh : HInst<
(outs IntRegs:$Rd32),
(ins DoubleRegs:$Rss32),
"$Rd32 = vsatwuh($Rss32)",
-S_2op_tc_1_SLOT23, TypeS_2op>, Enc_3742184 {
+tc_b86c7e8b, TypeS_2op>, Enc_90cd8b {
let Inst{13-5} = 0b000000100;
let Inst{31-21} = 0b10001000000;
let hasNewValue = 1;
@@ -21180,7 +21260,7 @@ def S2_vsatwuh_nopack : HInst<
(outs DoubleRegs:$Rdd32),
(ins DoubleRegs:$Rss32),
"$Rdd32 = vsatwuh($Rss32)",
-S_2op_tc_1_SLOT23, TypeS_2op>, Enc_13133231 {
+tc_b86c7e8b, TypeS_2op>, Enc_b9c5fb {
let Inst{13-5} = 0b000000101;
let Inst{31-21} = 0b10000000000;
let Defs = [USR_OVF];
@@ -21189,7 +21269,7 @@ def S2_vsplatrb : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rs32),
"$Rd32 = vsplatb($Rs32)",
-S_2op_tc_1_SLOT23, TypeS_2op>, Enc_4075554 {
+tc_b86c7e8b, TypeS_2op>, Enc_5e2823 {
let Inst{13-5} = 0b000000111;
let Inst{31-21} = 0b10001100010;
let hasNewValue = 1;
@@ -21201,7 +21281,7 @@ def S2_vsplatrh : HInst<
(outs DoubleRegs:$Rdd32),
(ins IntRegs:$Rs32),
"$Rdd32 = vsplath($Rs32)",
-S_2op_tc_1_SLOT23, TypeS_2op>, Enc_4030179 {
+tc_b86c7e8b, TypeS_2op>, Enc_3a3d62 {
let Inst{13-5} = 0b000000010;
let Inst{31-21} = 0b10000100010;
let isReMaterializable = 1;
@@ -21211,7 +21291,7 @@ def S2_vspliceib : HInst<
(outs DoubleRegs:$Rdd32),
(ins DoubleRegs:$Rss32, DoubleRegs:$Rtt32, u3_0Imm:$Ii),
"$Rdd32 = vspliceb($Rss32,$Rtt32,#$Ii)",
-S_3op_tc_1_SLOT23, TypeS_3op>, Enc_16730127 {
+tc_d1b5a4b6, TypeS_3op>, Enc_d50cd3 {
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11000000100;
}
@@ -21219,7 +21299,7 @@ def S2_vsplicerb : HInst<
(outs DoubleRegs:$Rdd32),
(ins DoubleRegs:$Rss32, DoubleRegs:$Rtt32, PredRegs:$Pu4),
"$Rdd32 = vspliceb($Rss32,$Rtt32,$Pu4)",
-S_3op_tc_1_SLOT23, TypeS_3op>, Enc_5178985 {
+tc_d1b5a4b6, TypeS_3op>, Enc_dbd70c {
let Inst{7-7} = 0b0;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11000010100;
@@ -21228,7 +21308,7 @@ def S2_vsxtbh : HInst<
(outs DoubleRegs:$Rdd32),
(ins IntRegs:$Rs32),
"$Rdd32 = vsxtbh($Rs32)",
-S_2op_tc_1_SLOT23, TypeS_2op>, Enc_4030179 {
+tc_b86c7e8b, TypeS_2op>, Enc_3a3d62 {
let Inst{13-5} = 0b000000000;
let Inst{31-21} = 0b10000100000;
let isReMaterializable = 1;
@@ -21238,7 +21318,7 @@ def S2_vsxthw : HInst<
(outs DoubleRegs:$Rdd32),
(ins IntRegs:$Rs32),
"$Rdd32 = vsxthw($Rs32)",
-S_2op_tc_1_SLOT23, TypeS_2op>, Enc_4030179 {
+tc_b86c7e8b, TypeS_2op>, Enc_3a3d62 {
let Inst{13-5} = 0b000000100;
let Inst{31-21} = 0b10000100000;
let isReMaterializable = 1;
@@ -21248,7 +21328,7 @@ def S2_vtrunehb : HInst<
(outs IntRegs:$Rd32),
(ins DoubleRegs:$Rss32),
"$Rd32 = vtrunehb($Rss32)",
-S_2op_tc_1_SLOT23, TypeS_2op>, Enc_3742184 {
+tc_b86c7e8b, TypeS_2op>, Enc_90cd8b {
let Inst{13-5} = 0b000000010;
let Inst{31-21} = 0b10001000100;
let hasNewValue = 1;
@@ -21258,7 +21338,7 @@ def S2_vtrunewh : HInst<
(outs DoubleRegs:$Rdd32),
(ins DoubleRegs:$Rss32, DoubleRegs:$Rtt32),
"$Rdd32 = vtrunewh($Rss32,$Rtt32)",
-S_3op_tc_1_SLOT23, TypeS_3op>, Enc_8333157 {
+tc_9c18c9a5, TypeS_3op>, Enc_a56825 {
let Inst{7-5} = 0b010;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11000001100;
@@ -21267,7 +21347,7 @@ def S2_vtrunohb : HInst<
(outs IntRegs:$Rd32),
(ins DoubleRegs:$Rss32),
"$Rd32 = vtrunohb($Rss32)",
-S_2op_tc_1_SLOT23, TypeS_2op>, Enc_3742184 {
+tc_b86c7e8b, TypeS_2op>, Enc_90cd8b {
let Inst{13-5} = 0b000000000;
let Inst{31-21} = 0b10001000100;
let hasNewValue = 1;
@@ -21277,7 +21357,7 @@ def S2_vtrunowh : HInst<
(outs DoubleRegs:$Rdd32),
(ins DoubleRegs:$Rss32, DoubleRegs:$Rtt32),
"$Rdd32 = vtrunowh($Rss32,$Rtt32)",
-S_3op_tc_1_SLOT23, TypeS_3op>, Enc_8333157 {
+tc_9c18c9a5, TypeS_3op>, Enc_a56825 {
let Inst{7-5} = 0b100;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11000001100;
@@ -21286,7 +21366,7 @@ def S2_vzxtbh : HInst<
(outs DoubleRegs:$Rdd32),
(ins IntRegs:$Rs32),
"$Rdd32 = vzxtbh($Rs32)",
-S_2op_tc_1_SLOT23, TypeS_2op>, Enc_4030179 {
+tc_b86c7e8b, TypeS_2op>, Enc_3a3d62 {
let Inst{13-5} = 0b000000010;
let Inst{31-21} = 0b10000100000;
let isReMaterializable = 1;
@@ -21296,7 +21376,7 @@ def S2_vzxthw : HInst<
(outs DoubleRegs:$Rdd32),
(ins IntRegs:$Rs32),
"$Rdd32 = vzxthw($Rs32)",
-S_2op_tc_1_SLOT23, TypeS_2op>, Enc_4030179 {
+tc_b86c7e8b, TypeS_2op>, Enc_3a3d62 {
let Inst{13-5} = 0b000000110;
let Inst{31-21} = 0b10000100000;
let isReMaterializable = 1;
@@ -21306,7 +21386,7 @@ def S4_addaddi : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rs32, IntRegs:$Ru32, s32_0Imm:$Ii),
"$Rd32 = add($Rs32,add($Ru32,#$Ii))",
-ALU64_tc_2_SLOT23, TypeALU64>, Enc_6495334 {
+tc_090485bb, TypeALU64>, Enc_8b8d61 {
let Inst{31-23} = 0b110110110;
let hasNewValue = 1;
let opNewValue = 0;
@@ -21321,7 +21401,7 @@ def S4_addi_asl_ri : HInst<
(outs IntRegs:$Rx32),
(ins u32_0Imm:$Ii, IntRegs:$Rx32in, u5_0Imm:$II),
"$Rx32 = add(#$Ii,asl($Rx32in,#$II))",
-ALU64_tc_2_SLOT23, TypeALU64>, Enc_117962 {
+tc_c0cd91a8, TypeALU64>, Enc_c31910 {
let Inst{2-0} = 0b100;
let Inst{4-4} = 0b0;
let Inst{31-24} = 0b11011110;
@@ -21339,7 +21419,7 @@ def S4_addi_lsr_ri : HInst<
(outs IntRegs:$Rx32),
(ins u32_0Imm:$Ii, IntRegs:$Rx32in, u5_0Imm:$II),
"$Rx32 = add(#$Ii,lsr($Rx32in,#$II))",
-ALU64_tc_2_SLOT23, TypeALU64>, Enc_117962 {
+tc_c0cd91a8, TypeALU64>, Enc_c31910 {
let Inst{2-0} = 0b100;
let Inst{4-4} = 0b1;
let Inst{31-24} = 0b11011110;
@@ -21357,7 +21437,7 @@ def S4_andi_asl_ri : HInst<
(outs IntRegs:$Rx32),
(ins u32_0Imm:$Ii, IntRegs:$Rx32in, u5_0Imm:$II),
"$Rx32 = and(#$Ii,asl($Rx32in,#$II))",
-ALU64_tc_2_SLOT23, TypeALU64>, Enc_117962 {
+tc_3c10f809, TypeALU64>, Enc_c31910 {
let Inst{2-0} = 0b000;
let Inst{4-4} = 0b0;
let Inst{31-24} = 0b11011110;
@@ -21375,7 +21455,7 @@ def S4_andi_lsr_ri : HInst<
(outs IntRegs:$Rx32),
(ins u32_0Imm:$Ii, IntRegs:$Rx32in, u5_0Imm:$II),
"$Rx32 = and(#$Ii,lsr($Rx32in,#$II))",
-ALU64_tc_2_SLOT23, TypeALU64>, Enc_117962 {
+tc_3c10f809, TypeALU64>, Enc_c31910 {
let Inst{2-0} = 0b000;
let Inst{4-4} = 0b1;
let Inst{31-24} = 0b11011110;
@@ -21393,7 +21473,7 @@ def S4_clbaddi : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rs32, s6_0Imm:$Ii),
"$Rd32 = add(clb($Rs32),#$Ii)",
-S_2op_tc_2_SLOT23, TypeS_2op>, Enc_5523416 {
+tc_87601822, TypeS_2op>, Enc_9fae8a {
let Inst{7-5} = 0b000;
let Inst{31-21} = 0b10001100001;
let hasNewValue = 1;
@@ -21404,7 +21484,7 @@ def S4_clbpaddi : HInst<
(outs IntRegs:$Rd32),
(ins DoubleRegs:$Rss32, s6_0Imm:$Ii),
"$Rd32 = add(clb($Rss32),#$Ii)",
-S_2op_tc_2_SLOT23, TypeS_2op>, Enc_10188026 {
+tc_87601822, TypeS_2op>, Enc_a1640c {
let Inst{7-5} = 0b010;
let Inst{31-21} = 0b10001000011;
let hasNewValue = 1;
@@ -21415,17 +21495,18 @@ def S4_clbpnorm : HInst<
(outs IntRegs:$Rd32),
(ins DoubleRegs:$Rss32),
"$Rd32 = normamt($Rss32)",
-S_2op_tc_1_SLOT23, TypeS_2op>, Enc_3742184 {
+tc_ab1b5e74, TypeS_2op>, Enc_90cd8b {
let Inst{13-5} = 0b000000000;
let Inst{31-21} = 0b10001000011;
let hasNewValue = 1;
let opNewValue = 0;
+let prefersSlot3 = 1;
}
def S4_extract : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rs32, u5_0Imm:$Ii, u5_0Imm:$II),
"$Rd32 = extract($Rs32,#$Ii,#$II)",
-S_2op_tc_2_SLOT23, TypeS_2op>, Enc_11930928 {
+tc_c0cd91a8, TypeS_2op>, Enc_b388cf {
let Inst{13-13} = 0b0;
let Inst{31-23} = 0b100011011;
let hasNewValue = 1;
@@ -21436,7 +21517,7 @@ def S4_extract_rp : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rs32, DoubleRegs:$Rtt32),
"$Rd32 = extract($Rs32,$Rtt32)",
-S_3op_tc_2_SLOT23, TypeS_3op>, Enc_15472748 {
+tc_87601822, TypeS_3op>, Enc_e07374 {
let Inst{7-5} = 0b010;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11001001000;
@@ -21448,7 +21529,7 @@ def S4_extractp : HInst<
(outs DoubleRegs:$Rdd32),
(ins DoubleRegs:$Rss32, u6_0Imm:$Ii, u6_0Imm:$II),
"$Rdd32 = extract($Rss32,#$Ii,#$II)",
-S_2op_tc_2_SLOT23, TypeS_2op>, Enc_9894557 {
+tc_c0cd91a8, TypeS_2op>, Enc_b84c4c {
let Inst{31-24} = 0b10001010;
let prefersSlot3 = 1;
}
@@ -21456,7 +21537,7 @@ def S4_extractp_rp : HInst<
(outs DoubleRegs:$Rdd32),
(ins DoubleRegs:$Rss32, DoubleRegs:$Rtt32),
"$Rdd32 = extract($Rss32,$Rtt32)",
-S_3op_tc_2_SLOT23, TypeS_3op>, Enc_8333157 {
+tc_87601822, TypeS_3op>, Enc_a56825 {
let Inst{7-5} = 0b100;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11000001110;
@@ -21466,7 +21547,7 @@ def S4_lsli : HInst<
(outs IntRegs:$Rd32),
(ins s6_0Imm:$Ii, IntRegs:$Rt32),
"$Rd32 = lsl(#$Ii,$Rt32)",
-S_3op_tc_1_SLOT23, TypeS_3op>, Enc_518319 {
+tc_9c18c9a5, TypeS_3op>, Enc_fef969 {
let Inst{7-6} = 0b11;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11000110100;
@@ -21477,7 +21558,7 @@ def S4_ntstbit_i : HInst<
(outs PredRegs:$Pd4),
(ins IntRegs:$Rs32, u5_0Imm:$Ii),
"$Pd4 = !tstbit($Rs32,#$Ii)",
-S_2op_tc_2early_SLOT23, TypeS_2op>, Enc_2103742 {
+tc_5fa2857c, TypeS_2op>, Enc_83ee64 {
let Inst{7-2} = 0b000000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b10000101001;
@@ -21486,7 +21567,7 @@ def S4_ntstbit_r : HInst<
(outs PredRegs:$Pd4),
(ins IntRegs:$Rs32, IntRegs:$Rt32),
"$Pd4 = !tstbit($Rs32,$Rt32)",
-S_3op_tc_2early_SLOT23, TypeS_3op>, Enc_10157519 {
+tc_c58f771a, TypeS_3op>, Enc_c2b48e {
let Inst{7-2} = 0b000000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11000111001;
@@ -21495,7 +21576,7 @@ def S4_or_andi : HInst<
(outs IntRegs:$Rx32),
(ins IntRegs:$Rx32in, IntRegs:$Rs32, s32_0Imm:$Ii),
"$Rx32 |= and($Rs32,#$Ii)",
-ALU64_tc_2_SLOT23, TypeALU64>, Enc_6356866 {
+tc_3c10f809, TypeALU64>, Enc_b0e9d8 {
let Inst{31-22} = 0b1101101000;
let hasNewValue = 1;
let opNewValue = 0;
@@ -21512,7 +21593,7 @@ def S4_or_andix : HInst<
(outs IntRegs:$Rx32),
(ins IntRegs:$Ru32, IntRegs:$Rx32in, s32_0Imm:$Ii),
"$Rx32 = or($Ru32,and($Rx32in,#$Ii))",
-ALU64_tc_2_SLOT23, TypeALU64>, Enc_7504828 {
+tc_3c10f809, TypeALU64>, Enc_b4e6cf {
let Inst{31-22} = 0b1101101001;
let hasNewValue = 1;
let opNewValue = 0;
@@ -21528,7 +21609,7 @@ def S4_or_ori : HInst<
(outs IntRegs:$Rx32),
(ins IntRegs:$Rx32in, IntRegs:$Rs32, s32_0Imm:$Ii),
"$Rx32 |= or($Rs32,#$Ii)",
-ALU64_tc_2_SLOT23, TypeALU64>, Enc_6356866 {
+tc_3c10f809, TypeALU64>, Enc_b0e9d8 {
let Inst{31-22} = 0b1101101010;
let hasNewValue = 1;
let opNewValue = 0;
@@ -21545,7 +21626,7 @@ def S4_ori_asl_ri : HInst<
(outs IntRegs:$Rx32),
(ins u32_0Imm:$Ii, IntRegs:$Rx32in, u5_0Imm:$II),
"$Rx32 = or(#$Ii,asl($Rx32in,#$II))",
-ALU64_tc_1_SLOT23, TypeALU64>, Enc_117962 {
+tc_3c10f809, TypeALU64>, Enc_c31910 {
let Inst{2-0} = 0b010;
let Inst{4-4} = 0b0;
let Inst{31-24} = 0b11011110;
@@ -21563,7 +21644,7 @@ def S4_ori_lsr_ri : HInst<
(outs IntRegs:$Rx32),
(ins u32_0Imm:$Ii, IntRegs:$Rx32in, u5_0Imm:$II),
"$Rx32 = or(#$Ii,lsr($Rx32in,#$II))",
-ALU64_tc_1_SLOT23, TypeALU64>, Enc_117962 {
+tc_3c10f809, TypeALU64>, Enc_c31910 {
let Inst{2-0} = 0b010;
let Inst{4-4} = 0b1;
let Inst{31-24} = 0b11011110;
@@ -21581,7 +21662,7 @@ def S4_parity : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rs32, IntRegs:$Rt32),
"$Rd32 = parity($Rs32,$Rt32)",
-ALU64_tc_2_SLOT23, TypeALU64>, Enc_14071773 {
+tc_87601822, TypeALU64>, Enc_5ab2be {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11010101111;
@@ -21593,7 +21674,7 @@ def S4_pstorerbf_abs : HInst<
(outs),
(ins PredRegs:$Pv4, u32_0Imm:$Ii, IntRegs:$Rt32),
"if (!$Pv4) memb(#$Ii) = $Rt32",
-ST_tc_st_SLOT01, TypeST>, Enc_16657398, AddrModeRel {
+tc_c85212ca, TypeST>, Enc_1cf4ca, AddrModeRel {
let Inst{2-2} = 0b1;
let Inst{7-7} = 0b1;
let Inst{13-13} = 0b0;
@@ -21618,7 +21699,7 @@ def S4_pstorerbf_rr : HInst<
(outs),
(ins PredRegs:$Pv4, IntRegs:$Rs32, IntRegs:$Ru32, u2_0Imm:$Ii, IntRegs:$Rt32),
"if (!$Pv4) memb($Rs32+$Ru32<<#$Ii) = $Rt32",
-V4LDST_tc_st_SLOT01, TypeST>, Enc_11940513, AddrModeRel {
+tc_7bc567a7, TypeST>, Enc_6339d5, AddrModeRel {
let Inst{31-21} = 0b00110101000;
let isPredicated = 1;
let isPredicatedFalse = 1;
@@ -21634,7 +21715,7 @@ def S4_pstorerbfnew_abs : HInst<
(outs),
(ins PredRegs:$Pv4, u32_0Imm:$Ii, IntRegs:$Rt32),
"if (!$Pv4.new) memb(#$Ii) = $Rt32",
-ST_tc_st_SLOT01, TypeST>, Enc_16657398, AddrModeRel {
+tc_336e698c, TypeST>, Enc_1cf4ca, AddrModeRel {
let Inst{2-2} = 0b1;
let Inst{7-7} = 0b1;
let Inst{13-13} = 0b1;
@@ -21643,8 +21724,8 @@ let isPredicated = 1;
let isPredicatedFalse = 1;
let addrMode = Absolute;
let accessSize = ByteAccess;
-let isExtended = 1;
let isPredicatedNew = 1;
+let isExtended = 1;
let mayStore = 1;
let CextOpcode = "S2_storerb";
let BaseOpcode = "S2_storerbabs";
@@ -21660,7 +21741,7 @@ def S4_pstorerbfnew_io : HInst<
(outs),
(ins PredRegs:$Pv4, IntRegs:$Rs32, u32_0Imm:$Ii, IntRegs:$Rt32),
"if (!$Pv4.new) memb($Rs32+#$Ii) = $Rt32",
-V2LDST_tc_st_SLOT01, TypeV2LDST>, Enc_14044877, AddrModeRel {
+tc_20a8e109, TypeV2LDST>, Enc_da8d43, AddrModeRel {
let Inst{2-2} = 0b0;
let Inst{31-21} = 0b01000110000;
let isPredicated = 1;
@@ -21683,7 +21764,7 @@ def S4_pstorerbfnew_rr : HInst<
(outs),
(ins PredRegs:$Pv4, IntRegs:$Rs32, IntRegs:$Ru32, u2_0Imm:$Ii, IntRegs:$Rt32),
"if (!$Pv4.new) memb($Rs32+$Ru32<<#$Ii) = $Rt32",
-V4LDST_tc_st_SLOT01, TypeST>, Enc_11940513, AddrModeRel {
+tc_7639d4b0, TypeST>, Enc_6339d5, AddrModeRel {
let Inst{31-21} = 0b00110111000;
let isPredicated = 1;
let isPredicatedFalse = 1;
@@ -21700,7 +21781,7 @@ def S4_pstorerbfnew_zomap : HInst<
(outs),
(ins PredRegs:$Pv4, IntRegs:$Rs32, IntRegs:$Rt32),
"if (!$Pv4.new) memb($Rs32) = $Rt32",
-PSEUDO, TypeMAPPING> {
+tc_20a8e109, TypeMAPPING> {
let isPseudo = 1;
let isCodeGenOnly = 1;
}
@@ -21708,7 +21789,7 @@ def S4_pstorerbnewf_abs : HInst<
(outs),
(ins PredRegs:$Pv4, u32_0Imm:$Ii, IntRegs:$Nt8),
"if (!$Pv4) memb(#$Ii) = $Nt8.new",
-NCJ_tc_3or4stall_SLOT0, TypeST>, Enc_1774350, AddrModeRel {
+tc_2c8fe5ae, TypeST>, Enc_44215c, AddrModeRel {
let Inst{2-2} = 0b1;
let Inst{7-7} = 0b1;
let Inst{13-11} = 0b000;
@@ -21718,9 +21799,9 @@ let isPredicatedFalse = 1;
let addrMode = Absolute;
let accessSize = ByteAccess;
let isNVStore = 1;
+let isNewValue = 1;
let isExtended = 1;
let mayStore = 1;
-let isNewValue = 1;
let CextOpcode = "S2_storerb";
let BaseOpcode = "S2_storerbabs";
let DecoderNamespace = "MustExtend";
@@ -21735,7 +21816,7 @@ def S4_pstorerbnewf_rr : HInst<
(outs),
(ins PredRegs:$Pv4, IntRegs:$Rs32, IntRegs:$Ru32, u2_0Imm:$Ii, IntRegs:$Nt8),
"if (!$Pv4) memb($Rs32+$Ru32<<#$Ii) = $Nt8.new",
-V4LDST_tc_st_SLOT0, TypeST>, Enc_11000933, AddrModeRel {
+tc_77781686, TypeST>, Enc_47ee5e, AddrModeRel {
let Inst{4-3} = 0b00;
let Inst{31-21} = 0b00110101101;
let isPredicated = 1;
@@ -21743,8 +21824,8 @@ let isPredicatedFalse = 1;
let addrMode = BaseRegOffset;
let accessSize = ByteAccess;
let isNVStore = 1;
-let mayStore = 1;
let isNewValue = 1;
+let mayStore = 1;
let CextOpcode = "S2_storerb";
let InputType = "reg";
let BaseOpcode = "S4_storerb_rr";
@@ -21754,7 +21835,7 @@ def S4_pstorerbnewfnew_abs : HInst<
(outs),
(ins PredRegs:$Pv4, u32_0Imm:$Ii, IntRegs:$Nt8),
"if (!$Pv4.new) memb(#$Ii) = $Nt8.new",
-NCJ_tc_3or4stall_SLOT0, TypeST>, Enc_1774350, AddrModeRel {
+tc_7986ba30, TypeST>, Enc_44215c, AddrModeRel {
let Inst{2-2} = 0b1;
let Inst{7-7} = 0b1;
let Inst{13-11} = 0b100;
@@ -21764,10 +21845,10 @@ let isPredicatedFalse = 1;
let addrMode = Absolute;
let accessSize = ByteAccess;
let isNVStore = 1;
-let isExtended = 1;
let isPredicatedNew = 1;
-let mayStore = 1;
let isNewValue = 1;
+let isExtended = 1;
+let mayStore = 1;
let CextOpcode = "S2_storerb";
let BaseOpcode = "S2_storerbabs";
let DecoderNamespace = "MustExtend";
@@ -21782,7 +21863,7 @@ def S4_pstorerbnewfnew_io : HInst<
(outs),
(ins PredRegs:$Pv4, IntRegs:$Rs32, u32_0Imm:$Ii, IntRegs:$Nt8),
"if (!$Pv4.new) memb($Rs32+#$Ii) = $Nt8.new",
-V2LDST_tc_st_SLOT0, TypeV2LDST>, Enc_1737833, AddrModeRel {
+tc_c8f9a6f6, TypeV2LDST>, Enc_585242, AddrModeRel {
let Inst{2-2} = 0b0;
let Inst{12-11} = 0b00;
let Inst{31-21} = 0b01000110101;
@@ -21792,8 +21873,8 @@ let addrMode = BaseImmOffset;
let accessSize = ByteAccess;
let isNVStore = 1;
let isPredicatedNew = 1;
-let mayStore = 1;
let isNewValue = 1;
+let mayStore = 1;
let CextOpcode = "S2_storerb";
let InputType = "imm";
let BaseOpcode = "S2_storerb_io";
@@ -21808,7 +21889,7 @@ def S4_pstorerbnewfnew_rr : HInst<
(outs),
(ins PredRegs:$Pv4, IntRegs:$Rs32, IntRegs:$Ru32, u2_0Imm:$Ii, IntRegs:$Nt8),
"if (!$Pv4.new) memb($Rs32+$Ru32<<#$Ii) = $Nt8.new",
-V4LDST_tc_st_SLOT0, TypeST>, Enc_11000933, AddrModeRel {
+tc_8def9c57, TypeST>, Enc_47ee5e, AddrModeRel {
let Inst{4-3} = 0b00;
let Inst{31-21} = 0b00110111101;
let isPredicated = 1;
@@ -21817,8 +21898,8 @@ let addrMode = BaseRegOffset;
let accessSize = ByteAccess;
let isNVStore = 1;
let isPredicatedNew = 1;
-let mayStore = 1;
let isNewValue = 1;
+let mayStore = 1;
let CextOpcode = "S2_storerb";
let InputType = "reg";
let BaseOpcode = "S4_storerb_rr";
@@ -21828,7 +21909,7 @@ def S4_pstorerbnewfnew_zomap : HInst<
(outs),
(ins PredRegs:$Pv4, IntRegs:$Rs32, IntRegs:$Nt8),
"if (!$Pv4.new) memb($Rs32) = $Nt8.new",
-PSEUDO, TypeMAPPING> {
+tc_c8f9a6f6, TypeMAPPING> {
let isPseudo = 1;
let isCodeGenOnly = 1;
let opNewValue = 2;
@@ -21837,7 +21918,7 @@ def S4_pstorerbnewt_abs : HInst<
(outs),
(ins PredRegs:$Pv4, u32_0Imm:$Ii, IntRegs:$Nt8),
"if ($Pv4) memb(#$Ii) = $Nt8.new",
-NCJ_tc_3or4stall_SLOT0, TypeST>, Enc_1774350, AddrModeRel {
+tc_2c8fe5ae, TypeST>, Enc_44215c, AddrModeRel {
let Inst{2-2} = 0b0;
let Inst{7-7} = 0b1;
let Inst{13-11} = 0b000;
@@ -21846,9 +21927,9 @@ let isPredicated = 1;
let addrMode = Absolute;
let accessSize = ByteAccess;
let isNVStore = 1;
+let isNewValue = 1;
let isExtended = 1;
let mayStore = 1;
-let isNewValue = 1;
let CextOpcode = "S2_storerb";
let BaseOpcode = "S2_storerbabs";
let DecoderNamespace = "MustExtend";
@@ -21863,15 +21944,15 @@ def S4_pstorerbnewt_rr : HInst<
(outs),
(ins PredRegs:$Pv4, IntRegs:$Rs32, IntRegs:$Ru32, u2_0Imm:$Ii, IntRegs:$Nt8),
"if ($Pv4) memb($Rs32+$Ru32<<#$Ii) = $Nt8.new",
-V4LDST_tc_st_SLOT0, TypeST>, Enc_11000933, AddrModeRel {
+tc_77781686, TypeST>, Enc_47ee5e, AddrModeRel {
let Inst{4-3} = 0b00;
let Inst{31-21} = 0b00110100101;
let isPredicated = 1;
let addrMode = BaseRegOffset;
let accessSize = ByteAccess;
let isNVStore = 1;
-let mayStore = 1;
let isNewValue = 1;
+let mayStore = 1;
let CextOpcode = "S2_storerb";
let InputType = "reg";
let BaseOpcode = "S4_storerb_rr";
@@ -21881,7 +21962,7 @@ def S4_pstorerbnewtnew_abs : HInst<
(outs),
(ins PredRegs:$Pv4, u32_0Imm:$Ii, IntRegs:$Nt8),
"if ($Pv4.new) memb(#$Ii) = $Nt8.new",
-NCJ_tc_3or4stall_SLOT0, TypeST>, Enc_1774350, AddrModeRel {
+tc_7986ba30, TypeST>, Enc_44215c, AddrModeRel {
let Inst{2-2} = 0b0;
let Inst{7-7} = 0b1;
let Inst{13-11} = 0b100;
@@ -21890,10 +21971,10 @@ let isPredicated = 1;
let addrMode = Absolute;
let accessSize = ByteAccess;
let isNVStore = 1;
-let isExtended = 1;
let isPredicatedNew = 1;
-let mayStore = 1;
let isNewValue = 1;
+let isExtended = 1;
+let mayStore = 1;
let CextOpcode = "S2_storerb";
let BaseOpcode = "S2_storerbabs";
let DecoderNamespace = "MustExtend";
@@ -21908,7 +21989,7 @@ def S4_pstorerbnewtnew_io : HInst<
(outs),
(ins PredRegs:$Pv4, IntRegs:$Rs32, u32_0Imm:$Ii, IntRegs:$Nt8),
"if ($Pv4.new) memb($Rs32+#$Ii) = $Nt8.new",
-V2LDST_tc_st_SLOT0, TypeV2LDST>, Enc_1737833, AddrModeRel {
+tc_c8f9a6f6, TypeV2LDST>, Enc_585242, AddrModeRel {
let Inst{2-2} = 0b0;
let Inst{12-11} = 0b00;
let Inst{31-21} = 0b01000010101;
@@ -21917,8 +21998,8 @@ let addrMode = BaseImmOffset;
let accessSize = ByteAccess;
let isNVStore = 1;
let isPredicatedNew = 1;
-let mayStore = 1;
let isNewValue = 1;
+let mayStore = 1;
let CextOpcode = "S2_storerb";
let InputType = "imm";
let BaseOpcode = "S2_storerb_io";
@@ -21933,7 +22014,7 @@ def S4_pstorerbnewtnew_rr : HInst<
(outs),
(ins PredRegs:$Pv4, IntRegs:$Rs32, IntRegs:$Ru32, u2_0Imm:$Ii, IntRegs:$Nt8),
"if ($Pv4.new) memb($Rs32+$Ru32<<#$Ii) = $Nt8.new",
-V4LDST_tc_st_SLOT0, TypeST>, Enc_11000933, AddrModeRel {
+tc_8def9c57, TypeST>, Enc_47ee5e, AddrModeRel {
let Inst{4-3} = 0b00;
let Inst{31-21} = 0b00110110101;
let isPredicated = 1;
@@ -21941,8 +22022,8 @@ let addrMode = BaseRegOffset;
let accessSize = ByteAccess;
let isNVStore = 1;
let isPredicatedNew = 1;
-let mayStore = 1;
let isNewValue = 1;
+let mayStore = 1;
let CextOpcode = "S2_storerb";
let InputType = "reg";
let BaseOpcode = "S4_storerb_rr";
@@ -21952,7 +22033,7 @@ def S4_pstorerbnewtnew_zomap : HInst<
(outs),
(ins PredRegs:$Pv4, IntRegs:$Rs32, IntRegs:$Nt8),
"if ($Pv4.new) memb($Rs32) = $Nt8.new",
-PSEUDO, TypeMAPPING> {
+tc_c8f9a6f6, TypeMAPPING> {
let isPseudo = 1;
let isCodeGenOnly = 1;
let opNewValue = 2;
@@ -21961,7 +22042,7 @@ def S4_pstorerbt_abs : HInst<
(outs),
(ins PredRegs:$Pv4, u32_0Imm:$Ii, IntRegs:$Rt32),
"if ($Pv4) memb(#$Ii) = $Rt32",
-ST_tc_st_SLOT01, TypeST>, Enc_16657398, AddrModeRel {
+tc_c85212ca, TypeST>, Enc_1cf4ca, AddrModeRel {
let Inst{2-2} = 0b0;
let Inst{7-7} = 0b1;
let Inst{13-13} = 0b0;
@@ -21985,7 +22066,7 @@ def S4_pstorerbt_rr : HInst<
(outs),
(ins PredRegs:$Pv4, IntRegs:$Rs32, IntRegs:$Ru32, u2_0Imm:$Ii, IntRegs:$Rt32),
"if ($Pv4) memb($Rs32+$Ru32<<#$Ii) = $Rt32",
-V4LDST_tc_st_SLOT01, TypeST>, Enc_11940513, AddrModeRel {
+tc_7bc567a7, TypeST>, Enc_6339d5, AddrModeRel {
let Inst{31-21} = 0b00110100000;
let isPredicated = 1;
let addrMode = BaseRegOffset;
@@ -22000,7 +22081,7 @@ def S4_pstorerbtnew_abs : HInst<
(outs),
(ins PredRegs:$Pv4, u32_0Imm:$Ii, IntRegs:$Rt32),
"if ($Pv4.new) memb(#$Ii) = $Rt32",
-ST_tc_st_SLOT01, TypeST>, Enc_16657398, AddrModeRel {
+tc_336e698c, TypeST>, Enc_1cf4ca, AddrModeRel {
let Inst{2-2} = 0b0;
let Inst{7-7} = 0b1;
let Inst{13-13} = 0b1;
@@ -22008,8 +22089,8 @@ let Inst{31-18} = 0b10101111000000;
let isPredicated = 1;
let addrMode = Absolute;
let accessSize = ByteAccess;
-let isExtended = 1;
let isPredicatedNew = 1;
+let isExtended = 1;
let mayStore = 1;
let CextOpcode = "S2_storerb";
let BaseOpcode = "S2_storerbabs";
@@ -22025,7 +22106,7 @@ def S4_pstorerbtnew_io : HInst<
(outs),
(ins PredRegs:$Pv4, IntRegs:$Rs32, u32_0Imm:$Ii, IntRegs:$Rt32),
"if ($Pv4.new) memb($Rs32+#$Ii) = $Rt32",
-V2LDST_tc_st_SLOT01, TypeV2LDST>, Enc_14044877, AddrModeRel {
+tc_20a8e109, TypeV2LDST>, Enc_da8d43, AddrModeRel {
let Inst{2-2} = 0b0;
let Inst{31-21} = 0b01000010000;
let isPredicated = 1;
@@ -22047,7 +22128,7 @@ def S4_pstorerbtnew_rr : HInst<
(outs),
(ins PredRegs:$Pv4, IntRegs:$Rs32, IntRegs:$Ru32, u2_0Imm:$Ii, IntRegs:$Rt32),
"if ($Pv4.new) memb($Rs32+$Ru32<<#$Ii) = $Rt32",
-V4LDST_tc_st_SLOT01, TypeST>, Enc_11940513, AddrModeRel {
+tc_7639d4b0, TypeST>, Enc_6339d5, AddrModeRel {
let Inst{31-21} = 0b00110110000;
let isPredicated = 1;
let addrMode = BaseRegOffset;
@@ -22063,7 +22144,7 @@ def S4_pstorerbtnew_zomap : HInst<
(outs),
(ins PredRegs:$Pv4, IntRegs:$Rs32, IntRegs:$Rt32),
"if ($Pv4.new) memb($Rs32) = $Rt32",
-PSEUDO, TypeMAPPING> {
+tc_20a8e109, TypeMAPPING> {
let isPseudo = 1;
let isCodeGenOnly = 1;
}
@@ -22071,7 +22152,7 @@ def S4_pstorerdf_abs : HInst<
(outs),
(ins PredRegs:$Pv4, u32_0Imm:$Ii, DoubleRegs:$Rtt32),
"if (!$Pv4) memd(#$Ii) = $Rtt32",
-ST_tc_st_SLOT01, TypeST>, Enc_13715847, AddrModeRel {
+tc_c85212ca, TypeST>, Enc_50b5ac, AddrModeRel {
let Inst{2-2} = 0b1;
let Inst{7-7} = 0b1;
let Inst{13-13} = 0b0;
@@ -22095,7 +22176,7 @@ def S4_pstorerdf_rr : HInst<
(outs),
(ins PredRegs:$Pv4, IntRegs:$Rs32, IntRegs:$Ru32, u2_0Imm:$Ii, DoubleRegs:$Rtt32),
"if (!$Pv4) memd($Rs32+$Ru32<<#$Ii) = $Rtt32",
-V4LDST_tc_st_SLOT01, TypeST>, Enc_9920336, AddrModeRel {
+tc_7bc567a7, TypeST>, Enc_1a9974, AddrModeRel {
let Inst{31-21} = 0b00110101110;
let isPredicated = 1;
let isPredicatedFalse = 1;
@@ -22110,7 +22191,7 @@ def S4_pstorerdfnew_abs : HInst<
(outs),
(ins PredRegs:$Pv4, u32_0Imm:$Ii, DoubleRegs:$Rtt32),
"if (!$Pv4.new) memd(#$Ii) = $Rtt32",
-ST_tc_st_SLOT01, TypeST>, Enc_13715847, AddrModeRel {
+tc_336e698c, TypeST>, Enc_50b5ac, AddrModeRel {
let Inst{2-2} = 0b1;
let Inst{7-7} = 0b1;
let Inst{13-13} = 0b1;
@@ -22119,8 +22200,8 @@ let isPredicated = 1;
let isPredicatedFalse = 1;
let addrMode = Absolute;
let accessSize = DoubleWordAccess;
-let isExtended = 1;
let isPredicatedNew = 1;
+let isExtended = 1;
let mayStore = 1;
let CextOpcode = "S2_storerd";
let BaseOpcode = "S2_storerdabs";
@@ -22135,7 +22216,7 @@ def S4_pstorerdfnew_io : HInst<
(outs),
(ins PredRegs:$Pv4, IntRegs:$Rs32, u29_3Imm:$Ii, DoubleRegs:$Rtt32),
"if (!$Pv4.new) memd($Rs32+#$Ii) = $Rtt32",
-V2LDST_tc_st_SLOT01, TypeV2LDST>, Enc_11049656, AddrModeRel {
+tc_20a8e109, TypeV2LDST>, Enc_57a33e, AddrModeRel {
let Inst{2-2} = 0b0;
let Inst{31-21} = 0b01000110110;
let isPredicated = 1;
@@ -22157,7 +22238,7 @@ def S4_pstorerdfnew_rr : HInst<
(outs),
(ins PredRegs:$Pv4, IntRegs:$Rs32, IntRegs:$Ru32, u2_0Imm:$Ii, DoubleRegs:$Rtt32),
"if (!$Pv4.new) memd($Rs32+$Ru32<<#$Ii) = $Rtt32",
-V4LDST_tc_st_SLOT01, TypeST>, Enc_9920336, AddrModeRel {
+tc_7639d4b0, TypeST>, Enc_1a9974, AddrModeRel {
let Inst{31-21} = 0b00110111110;
let isPredicated = 1;
let isPredicatedFalse = 1;
@@ -22173,7 +22254,7 @@ def S4_pstorerdfnew_zomap : HInst<
(outs),
(ins PredRegs:$Pv4, IntRegs:$Rs32, DoubleRegs:$Rtt32),
"if (!$Pv4.new) memd($Rs32) = $Rtt32",
-PSEUDO, TypeMAPPING> {
+tc_20a8e109, TypeMAPPING> {
let isPseudo = 1;
let isCodeGenOnly = 1;
}
@@ -22181,7 +22262,7 @@ def S4_pstorerdt_abs : HInst<
(outs),
(ins PredRegs:$Pv4, u32_0Imm:$Ii, DoubleRegs:$Rtt32),
"if ($Pv4) memd(#$Ii) = $Rtt32",
-ST_tc_st_SLOT01, TypeST>, Enc_13715847, AddrModeRel {
+tc_c85212ca, TypeST>, Enc_50b5ac, AddrModeRel {
let Inst{2-2} = 0b0;
let Inst{7-7} = 0b1;
let Inst{13-13} = 0b0;
@@ -22204,7 +22285,7 @@ def S4_pstorerdt_rr : HInst<
(outs),
(ins PredRegs:$Pv4, IntRegs:$Rs32, IntRegs:$Ru32, u2_0Imm:$Ii, DoubleRegs:$Rtt32),
"if ($Pv4) memd($Rs32+$Ru32<<#$Ii) = $Rtt32",
-V4LDST_tc_st_SLOT01, TypeST>, Enc_9920336, AddrModeRel {
+tc_7bc567a7, TypeST>, Enc_1a9974, AddrModeRel {
let Inst{31-21} = 0b00110100110;
let isPredicated = 1;
let addrMode = BaseRegOffset;
@@ -22218,7 +22299,7 @@ def S4_pstorerdtnew_abs : HInst<
(outs),
(ins PredRegs:$Pv4, u32_0Imm:$Ii, DoubleRegs:$Rtt32),
"if ($Pv4.new) memd(#$Ii) = $Rtt32",
-ST_tc_st_SLOT01, TypeST>, Enc_13715847, AddrModeRel {
+tc_336e698c, TypeST>, Enc_50b5ac, AddrModeRel {
let Inst{2-2} = 0b0;
let Inst{7-7} = 0b1;
let Inst{13-13} = 0b1;
@@ -22226,8 +22307,8 @@ let Inst{31-18} = 0b10101111110000;
let isPredicated = 1;
let addrMode = Absolute;
let accessSize = DoubleWordAccess;
-let isExtended = 1;
let isPredicatedNew = 1;
+let isExtended = 1;
let mayStore = 1;
let CextOpcode = "S2_storerd";
let BaseOpcode = "S2_storerdabs";
@@ -22242,7 +22323,7 @@ def S4_pstorerdtnew_io : HInst<
(outs),
(ins PredRegs:$Pv4, IntRegs:$Rs32, u29_3Imm:$Ii, DoubleRegs:$Rtt32),
"if ($Pv4.new) memd($Rs32+#$Ii) = $Rtt32",
-V2LDST_tc_st_SLOT01, TypeV2LDST>, Enc_11049656, AddrModeRel {
+tc_20a8e109, TypeV2LDST>, Enc_57a33e, AddrModeRel {
let Inst{2-2} = 0b0;
let Inst{31-21} = 0b01000010110;
let isPredicated = 1;
@@ -22263,7 +22344,7 @@ def S4_pstorerdtnew_rr : HInst<
(outs),
(ins PredRegs:$Pv4, IntRegs:$Rs32, IntRegs:$Ru32, u2_0Imm:$Ii, DoubleRegs:$Rtt32),
"if ($Pv4.new) memd($Rs32+$Ru32<<#$Ii) = $Rtt32",
-V4LDST_tc_st_SLOT01, TypeST>, Enc_9920336, AddrModeRel {
+tc_7639d4b0, TypeST>, Enc_1a9974, AddrModeRel {
let Inst{31-21} = 0b00110110110;
let isPredicated = 1;
let addrMode = BaseRegOffset;
@@ -22278,7 +22359,7 @@ def S4_pstorerdtnew_zomap : HInst<
(outs),
(ins PredRegs:$Pv4, IntRegs:$Rs32, DoubleRegs:$Rtt32),
"if ($Pv4.new) memd($Rs32) = $Rtt32",
-PSEUDO, TypeMAPPING> {
+tc_20a8e109, TypeMAPPING> {
let isPseudo = 1;
let isCodeGenOnly = 1;
}
@@ -22286,7 +22367,7 @@ def S4_pstorerff_abs : HInst<
(outs),
(ins PredRegs:$Pv4, u32_0Imm:$Ii, IntRegs:$Rt32),
"if (!$Pv4) memh(#$Ii) = $Rt32.h",
-ST_tc_st_SLOT01, TypeST>, Enc_16657398, AddrModeRel {
+tc_c85212ca, TypeST>, Enc_1cf4ca, AddrModeRel {
let Inst{2-2} = 0b1;
let Inst{7-7} = 0b1;
let Inst{13-13} = 0b0;
@@ -22310,7 +22391,7 @@ def S4_pstorerff_rr : HInst<
(outs),
(ins PredRegs:$Pv4, IntRegs:$Rs32, IntRegs:$Ru32, u2_0Imm:$Ii, IntRegs:$Rt32),
"if (!$Pv4) memh($Rs32+$Ru32<<#$Ii) = $Rt32.h",
-V4LDST_tc_st_SLOT01, TypeST>, Enc_11940513, AddrModeRel {
+tc_7bc567a7, TypeST>, Enc_6339d5, AddrModeRel {
let Inst{31-21} = 0b00110101011;
let isPredicated = 1;
let isPredicatedFalse = 1;
@@ -22325,7 +22406,7 @@ def S4_pstorerffnew_abs : HInst<
(outs),
(ins PredRegs:$Pv4, u32_0Imm:$Ii, IntRegs:$Rt32),
"if (!$Pv4.new) memh(#$Ii) = $Rt32.h",
-ST_tc_st_SLOT01, TypeST>, Enc_16657398, AddrModeRel {
+tc_336e698c, TypeST>, Enc_1cf4ca, AddrModeRel {
let Inst{2-2} = 0b1;
let Inst{7-7} = 0b1;
let Inst{13-13} = 0b1;
@@ -22334,8 +22415,8 @@ let isPredicated = 1;
let isPredicatedFalse = 1;
let addrMode = Absolute;
let accessSize = HalfWordAccess;
-let isExtended = 1;
let isPredicatedNew = 1;
+let isExtended = 1;
let mayStore = 1;
let CextOpcode = "S2_storerf";
let BaseOpcode = "S2_storerfabs";
@@ -22350,7 +22431,7 @@ def S4_pstorerffnew_io : HInst<
(outs),
(ins PredRegs:$Pv4, IntRegs:$Rs32, u31_1Imm:$Ii, IntRegs:$Rt32),
"if (!$Pv4.new) memh($Rs32+#$Ii) = $Rt32.h",
-V2LDST_tc_st_SLOT01, TypeV2LDST>, Enc_10979813, AddrModeRel {
+tc_20a8e109, TypeV2LDST>, Enc_e8c45e, AddrModeRel {
let Inst{2-2} = 0b0;
let Inst{31-21} = 0b01000110011;
let isPredicated = 1;
@@ -22372,7 +22453,7 @@ def S4_pstorerffnew_rr : HInst<
(outs),
(ins PredRegs:$Pv4, IntRegs:$Rs32, IntRegs:$Ru32, u2_0Imm:$Ii, IntRegs:$Rt32),
"if (!$Pv4.new) memh($Rs32+$Ru32<<#$Ii) = $Rt32.h",
-V4LDST_tc_st_SLOT01, TypeST>, Enc_11940513, AddrModeRel {
+tc_7639d4b0, TypeST>, Enc_6339d5, AddrModeRel {
let Inst{31-21} = 0b00110111011;
let isPredicated = 1;
let isPredicatedFalse = 1;
@@ -22388,7 +22469,7 @@ def S4_pstorerffnew_zomap : HInst<
(outs),
(ins PredRegs:$Pv4, IntRegs:$Rs32, IntRegs:$Rt32),
"if (!$Pv4.new) memh($Rs32) = $Rt32.h",
-PSEUDO, TypeMAPPING> {
+tc_20a8e109, TypeMAPPING> {
let isPseudo = 1;
let isCodeGenOnly = 1;
}
@@ -22396,7 +22477,7 @@ def S4_pstorerft_abs : HInst<
(outs),
(ins PredRegs:$Pv4, u32_0Imm:$Ii, IntRegs:$Rt32),
"if ($Pv4) memh(#$Ii) = $Rt32.h",
-ST_tc_st_SLOT01, TypeST>, Enc_16657398, AddrModeRel {
+tc_c85212ca, TypeST>, Enc_1cf4ca, AddrModeRel {
let Inst{2-2} = 0b0;
let Inst{7-7} = 0b1;
let Inst{13-13} = 0b0;
@@ -22419,7 +22500,7 @@ def S4_pstorerft_rr : HInst<
(outs),
(ins PredRegs:$Pv4, IntRegs:$Rs32, IntRegs:$Ru32, u2_0Imm:$Ii, IntRegs:$Rt32),
"if ($Pv4) memh($Rs32+$Ru32<<#$Ii) = $Rt32.h",
-V4LDST_tc_st_SLOT01, TypeST>, Enc_11940513, AddrModeRel {
+tc_7bc567a7, TypeST>, Enc_6339d5, AddrModeRel {
let Inst{31-21} = 0b00110100011;
let isPredicated = 1;
let addrMode = BaseRegOffset;
@@ -22433,7 +22514,7 @@ def S4_pstorerftnew_abs : HInst<
(outs),
(ins PredRegs:$Pv4, u32_0Imm:$Ii, IntRegs:$Rt32),
"if ($Pv4.new) memh(#$Ii) = $Rt32.h",
-ST_tc_st_SLOT01, TypeST>, Enc_16657398, AddrModeRel {
+tc_336e698c, TypeST>, Enc_1cf4ca, AddrModeRel {
let Inst{2-2} = 0b0;
let Inst{7-7} = 0b1;
let Inst{13-13} = 0b1;
@@ -22441,8 +22522,8 @@ let Inst{31-18} = 0b10101111011000;
let isPredicated = 1;
let addrMode = Absolute;
let accessSize = HalfWordAccess;
-let isExtended = 1;
let isPredicatedNew = 1;
+let isExtended = 1;
let mayStore = 1;
let CextOpcode = "S2_storerf";
let BaseOpcode = "S2_storerfabs";
@@ -22457,7 +22538,7 @@ def S4_pstorerftnew_io : HInst<
(outs),
(ins PredRegs:$Pv4, IntRegs:$Rs32, u31_1Imm:$Ii, IntRegs:$Rt32),
"if ($Pv4.new) memh($Rs32+#$Ii) = $Rt32.h",
-V2LDST_tc_st_SLOT01, TypeV2LDST>, Enc_10979813, AddrModeRel {
+tc_20a8e109, TypeV2LDST>, Enc_e8c45e, AddrModeRel {
let Inst{2-2} = 0b0;
let Inst{31-21} = 0b01000010011;
let isPredicated = 1;
@@ -22478,7 +22559,7 @@ def S4_pstorerftnew_rr : HInst<
(outs),
(ins PredRegs:$Pv4, IntRegs:$Rs32, IntRegs:$Ru32, u2_0Imm:$Ii, IntRegs:$Rt32),
"if ($Pv4.new) memh($Rs32+$Ru32<<#$Ii) = $Rt32.h",
-V4LDST_tc_st_SLOT01, TypeST>, Enc_11940513, AddrModeRel {
+tc_7639d4b0, TypeST>, Enc_6339d5, AddrModeRel {
let Inst{31-21} = 0b00110110011;
let isPredicated = 1;
let addrMode = BaseRegOffset;
@@ -22493,7 +22574,7 @@ def S4_pstorerftnew_zomap : HInst<
(outs),
(ins PredRegs:$Pv4, IntRegs:$Rs32, IntRegs:$Rt32),
"if ($Pv4.new) memh($Rs32) = $Rt32.h",
-PSEUDO, TypeMAPPING> {
+tc_20a8e109, TypeMAPPING> {
let isPseudo = 1;
let isCodeGenOnly = 1;
}
@@ -22501,7 +22582,7 @@ def S4_pstorerhf_abs : HInst<
(outs),
(ins PredRegs:$Pv4, u32_0Imm:$Ii, IntRegs:$Rt32),
"if (!$Pv4) memh(#$Ii) = $Rt32",
-ST_tc_st_SLOT01, TypeST>, Enc_16657398, AddrModeRel {
+tc_c85212ca, TypeST>, Enc_1cf4ca, AddrModeRel {
let Inst{2-2} = 0b1;
let Inst{7-7} = 0b1;
let Inst{13-13} = 0b0;
@@ -22526,7 +22607,7 @@ def S4_pstorerhf_rr : HInst<
(outs),
(ins PredRegs:$Pv4, IntRegs:$Rs32, IntRegs:$Ru32, u2_0Imm:$Ii, IntRegs:$Rt32),
"if (!$Pv4) memh($Rs32+$Ru32<<#$Ii) = $Rt32",
-V4LDST_tc_st_SLOT01, TypeST>, Enc_11940513, AddrModeRel {
+tc_7bc567a7, TypeST>, Enc_6339d5, AddrModeRel {
let Inst{31-21} = 0b00110101010;
let isPredicated = 1;
let isPredicatedFalse = 1;
@@ -22542,7 +22623,7 @@ def S4_pstorerhfnew_abs : HInst<
(outs),
(ins PredRegs:$Pv4, u32_0Imm:$Ii, IntRegs:$Rt32),
"if (!$Pv4.new) memh(#$Ii) = $Rt32",
-ST_tc_st_SLOT01, TypeST>, Enc_16657398, AddrModeRel {
+tc_336e698c, TypeST>, Enc_1cf4ca, AddrModeRel {
let Inst{2-2} = 0b1;
let Inst{7-7} = 0b1;
let Inst{13-13} = 0b1;
@@ -22551,8 +22632,8 @@ let isPredicated = 1;
let isPredicatedFalse = 1;
let addrMode = Absolute;
let accessSize = HalfWordAccess;
-let isExtended = 1;
let isPredicatedNew = 1;
+let isExtended = 1;
let mayStore = 1;
let CextOpcode = "S2_storerh";
let BaseOpcode = "S2_storerhabs";
@@ -22568,7 +22649,7 @@ def S4_pstorerhfnew_io : HInst<
(outs),
(ins PredRegs:$Pv4, IntRegs:$Rs32, u31_1Imm:$Ii, IntRegs:$Rt32),
"if (!$Pv4.new) memh($Rs32+#$Ii) = $Rt32",
-V2LDST_tc_st_SLOT01, TypeV2LDST>, Enc_10979813, AddrModeRel {
+tc_20a8e109, TypeV2LDST>, Enc_e8c45e, AddrModeRel {
let Inst{2-2} = 0b0;
let Inst{31-21} = 0b01000110010;
let isPredicated = 1;
@@ -22591,7 +22672,7 @@ def S4_pstorerhfnew_rr : HInst<
(outs),
(ins PredRegs:$Pv4, IntRegs:$Rs32, IntRegs:$Ru32, u2_0Imm:$Ii, IntRegs:$Rt32),
"if (!$Pv4.new) memh($Rs32+$Ru32<<#$Ii) = $Rt32",
-V4LDST_tc_st_SLOT01, TypeST>, Enc_11940513, AddrModeRel {
+tc_7639d4b0, TypeST>, Enc_6339d5, AddrModeRel {
let Inst{31-21} = 0b00110111010;
let isPredicated = 1;
let isPredicatedFalse = 1;
@@ -22608,7 +22689,7 @@ def S4_pstorerhfnew_zomap : HInst<
(outs),
(ins PredRegs:$Pv4, IntRegs:$Rs32, IntRegs:$Rt32),
"if (!$Pv4.new) memh($Rs32) = $Rt32",
-PSEUDO, TypeMAPPING> {
+tc_20a8e109, TypeMAPPING> {
let isPseudo = 1;
let isCodeGenOnly = 1;
}
@@ -22616,7 +22697,7 @@ def S4_pstorerhnewf_abs : HInst<
(outs),
(ins PredRegs:$Pv4, u32_0Imm:$Ii, IntRegs:$Nt8),
"if (!$Pv4) memh(#$Ii) = $Nt8.new",
-NCJ_tc_3or4stall_SLOT0, TypeST>, Enc_1774350, AddrModeRel {
+tc_2c8fe5ae, TypeST>, Enc_44215c, AddrModeRel {
let Inst{2-2} = 0b1;
let Inst{7-7} = 0b1;
let Inst{13-11} = 0b001;
@@ -22626,9 +22707,9 @@ let isPredicatedFalse = 1;
let addrMode = Absolute;
let accessSize = HalfWordAccess;
let isNVStore = 1;
+let isNewValue = 1;
let isExtended = 1;
let mayStore = 1;
-let isNewValue = 1;
let CextOpcode = "S2_storerh";
let BaseOpcode = "S2_storerhabs";
let DecoderNamespace = "MustExtend";
@@ -22643,7 +22724,7 @@ def S4_pstorerhnewf_rr : HInst<
(outs),
(ins PredRegs:$Pv4, IntRegs:$Rs32, IntRegs:$Ru32, u2_0Imm:$Ii, IntRegs:$Nt8),
"if (!$Pv4) memh($Rs32+$Ru32<<#$Ii) = $Nt8.new",
-V4LDST_tc_st_SLOT0, TypeST>, Enc_11000933, AddrModeRel {
+tc_77781686, TypeST>, Enc_47ee5e, AddrModeRel {
let Inst{4-3} = 0b01;
let Inst{31-21} = 0b00110101101;
let isPredicated = 1;
@@ -22651,8 +22732,8 @@ let isPredicatedFalse = 1;
let addrMode = BaseRegOffset;
let accessSize = HalfWordAccess;
let isNVStore = 1;
-let mayStore = 1;
let isNewValue = 1;
+let mayStore = 1;
let CextOpcode = "S2_storerh";
let InputType = "reg";
let BaseOpcode = "S2_storerh_rr";
@@ -22662,7 +22743,7 @@ def S4_pstorerhnewfnew_abs : HInst<
(outs),
(ins PredRegs:$Pv4, u32_0Imm:$Ii, IntRegs:$Nt8),
"if (!$Pv4.new) memh(#$Ii) = $Nt8.new",
-NCJ_tc_3or4stall_SLOT0, TypeST>, Enc_1774350, AddrModeRel {
+tc_7986ba30, TypeST>, Enc_44215c, AddrModeRel {
let Inst{2-2} = 0b1;
let Inst{7-7} = 0b1;
let Inst{13-11} = 0b101;
@@ -22672,10 +22753,10 @@ let isPredicatedFalse = 1;
let addrMode = Absolute;
let accessSize = HalfWordAccess;
let isNVStore = 1;
-let isExtended = 1;
let isPredicatedNew = 1;
-let mayStore = 1;
let isNewValue = 1;
+let isExtended = 1;
+let mayStore = 1;
let CextOpcode = "S2_storerh";
let BaseOpcode = "S2_storerhabs";
let DecoderNamespace = "MustExtend";
@@ -22690,7 +22771,7 @@ def S4_pstorerhnewfnew_io : HInst<
(outs),
(ins PredRegs:$Pv4, IntRegs:$Rs32, u31_1Imm:$Ii, IntRegs:$Nt8),
"if (!$Pv4.new) memh($Rs32+#$Ii) = $Nt8.new",
-V2LDST_tc_st_SLOT0, TypeV2LDST>, Enc_6154421, AddrModeRel {
+tc_c8f9a6f6, TypeV2LDST>, Enc_f44229, AddrModeRel {
let Inst{2-2} = 0b0;
let Inst{12-11} = 0b01;
let Inst{31-21} = 0b01000110101;
@@ -22700,8 +22781,8 @@ let addrMode = BaseImmOffset;
let accessSize = HalfWordAccess;
let isNVStore = 1;
let isPredicatedNew = 1;
-let mayStore = 1;
let isNewValue = 1;
+let mayStore = 1;
let CextOpcode = "S2_storerh";
let InputType = "imm";
let BaseOpcode = "S2_storerh_io";
@@ -22716,7 +22797,7 @@ def S4_pstorerhnewfnew_rr : HInst<
(outs),
(ins PredRegs:$Pv4, IntRegs:$Rs32, IntRegs:$Ru32, u2_0Imm:$Ii, IntRegs:$Nt8),
"if (!$Pv4.new) memh($Rs32+$Ru32<<#$Ii) = $Nt8.new",
-V4LDST_tc_st_SLOT0, TypeST>, Enc_11000933, AddrModeRel {
+tc_8def9c57, TypeST>, Enc_47ee5e, AddrModeRel {
let Inst{4-3} = 0b01;
let Inst{31-21} = 0b00110111101;
let isPredicated = 1;
@@ -22725,8 +22806,8 @@ let addrMode = BaseRegOffset;
let accessSize = HalfWordAccess;
let isNVStore = 1;
let isPredicatedNew = 1;
-let mayStore = 1;
let isNewValue = 1;
+let mayStore = 1;
let CextOpcode = "S2_storerh";
let InputType = "reg";
let BaseOpcode = "S2_storerh_rr";
@@ -22736,7 +22817,7 @@ def S4_pstorerhnewfnew_zomap : HInst<
(outs),
(ins PredRegs:$Pv4, IntRegs:$Rs32, IntRegs:$Nt8),
"if (!$Pv4.new) memh($Rs32) = $Nt8.new",
-PSEUDO, TypeMAPPING> {
+tc_c8f9a6f6, TypeMAPPING> {
let isPseudo = 1;
let isCodeGenOnly = 1;
let opNewValue = 2;
@@ -22745,7 +22826,7 @@ def S4_pstorerhnewt_abs : HInst<
(outs),
(ins PredRegs:$Pv4, u32_0Imm:$Ii, IntRegs:$Nt8),
"if ($Pv4) memh(#$Ii) = $Nt8.new",
-NCJ_tc_3or4stall_SLOT0, TypeST>, Enc_1774350, AddrModeRel {
+tc_2c8fe5ae, TypeST>, Enc_44215c, AddrModeRel {
let Inst{2-2} = 0b0;
let Inst{7-7} = 0b1;
let Inst{13-11} = 0b001;
@@ -22754,9 +22835,9 @@ let isPredicated = 1;
let addrMode = Absolute;
let accessSize = HalfWordAccess;
let isNVStore = 1;
+let isNewValue = 1;
let isExtended = 1;
let mayStore = 1;
-let isNewValue = 1;
let CextOpcode = "S2_storerh";
let BaseOpcode = "S2_storerhabs";
let DecoderNamespace = "MustExtend";
@@ -22771,15 +22852,15 @@ def S4_pstorerhnewt_rr : HInst<
(outs),
(ins PredRegs:$Pv4, IntRegs:$Rs32, IntRegs:$Ru32, u2_0Imm:$Ii, IntRegs:$Nt8),
"if ($Pv4) memh($Rs32+$Ru32<<#$Ii) = $Nt8.new",
-V4LDST_tc_st_SLOT0, TypeST>, Enc_11000933, AddrModeRel {
+tc_77781686, TypeST>, Enc_47ee5e, AddrModeRel {
let Inst{4-3} = 0b01;
let Inst{31-21} = 0b00110100101;
let isPredicated = 1;
let addrMode = BaseRegOffset;
let accessSize = HalfWordAccess;
let isNVStore = 1;
-let mayStore = 1;
let isNewValue = 1;
+let mayStore = 1;
let CextOpcode = "S2_storerh";
let InputType = "reg";
let BaseOpcode = "S2_storerh_rr";
@@ -22789,7 +22870,7 @@ def S4_pstorerhnewtnew_abs : HInst<
(outs),
(ins PredRegs:$Pv4, u32_0Imm:$Ii, IntRegs:$Nt8),
"if ($Pv4.new) memh(#$Ii) = $Nt8.new",
-NCJ_tc_3or4stall_SLOT0, TypeST>, Enc_1774350, AddrModeRel {
+tc_7986ba30, TypeST>, Enc_44215c, AddrModeRel {
let Inst{2-2} = 0b0;
let Inst{7-7} = 0b1;
let Inst{13-11} = 0b101;
@@ -22798,10 +22879,10 @@ let isPredicated = 1;
let addrMode = Absolute;
let accessSize = HalfWordAccess;
let isNVStore = 1;
-let isExtended = 1;
let isPredicatedNew = 1;
-let mayStore = 1;
let isNewValue = 1;
+let isExtended = 1;
+let mayStore = 1;
let CextOpcode = "S2_storerh";
let BaseOpcode = "S2_storerhabs";
let DecoderNamespace = "MustExtend";
@@ -22816,7 +22897,7 @@ def S4_pstorerhnewtnew_io : HInst<
(outs),
(ins PredRegs:$Pv4, IntRegs:$Rs32, u31_1Imm:$Ii, IntRegs:$Nt8),
"if ($Pv4.new) memh($Rs32+#$Ii) = $Nt8.new",
-V2LDST_tc_st_SLOT0, TypeV2LDST>, Enc_6154421, AddrModeRel {
+tc_c8f9a6f6, TypeV2LDST>, Enc_f44229, AddrModeRel {
let Inst{2-2} = 0b0;
let Inst{12-11} = 0b01;
let Inst{31-21} = 0b01000010101;
@@ -22825,8 +22906,8 @@ let addrMode = BaseImmOffset;
let accessSize = HalfWordAccess;
let isNVStore = 1;
let isPredicatedNew = 1;
-let mayStore = 1;
let isNewValue = 1;
+let mayStore = 1;
let CextOpcode = "S2_storerh";
let InputType = "imm";
let BaseOpcode = "S2_storerh_io";
@@ -22841,7 +22922,7 @@ def S4_pstorerhnewtnew_rr : HInst<
(outs),
(ins PredRegs:$Pv4, IntRegs:$Rs32, IntRegs:$Ru32, u2_0Imm:$Ii, IntRegs:$Nt8),
"if ($Pv4.new) memh($Rs32+$Ru32<<#$Ii) = $Nt8.new",
-V4LDST_tc_st_SLOT0, TypeST>, Enc_11000933, AddrModeRel {
+tc_8def9c57, TypeST>, Enc_47ee5e, AddrModeRel {
let Inst{4-3} = 0b01;
let Inst{31-21} = 0b00110110101;
let isPredicated = 1;
@@ -22849,8 +22930,8 @@ let addrMode = BaseRegOffset;
let accessSize = HalfWordAccess;
let isNVStore = 1;
let isPredicatedNew = 1;
-let mayStore = 1;
let isNewValue = 1;
+let mayStore = 1;
let CextOpcode = "S2_storerh";
let InputType = "reg";
let BaseOpcode = "S2_storerh_rr";
@@ -22860,7 +22941,7 @@ def S4_pstorerhnewtnew_zomap : HInst<
(outs),
(ins PredRegs:$Pv4, IntRegs:$Rs32, IntRegs:$Nt8),
"if ($Pv4.new) memh($Rs32) = $Nt8.new",
-PSEUDO, TypeMAPPING> {
+tc_c8f9a6f6, TypeMAPPING> {
let isPseudo = 1;
let isCodeGenOnly = 1;
let opNewValue = 2;
@@ -22869,7 +22950,7 @@ def S4_pstorerht_abs : HInst<
(outs),
(ins PredRegs:$Pv4, u32_0Imm:$Ii, IntRegs:$Rt32),
"if ($Pv4) memh(#$Ii) = $Rt32",
-ST_tc_st_SLOT01, TypeST>, Enc_16657398, AddrModeRel {
+tc_c85212ca, TypeST>, Enc_1cf4ca, AddrModeRel {
let Inst{2-2} = 0b0;
let Inst{7-7} = 0b1;
let Inst{13-13} = 0b0;
@@ -22893,7 +22974,7 @@ def S4_pstorerht_rr : HInst<
(outs),
(ins PredRegs:$Pv4, IntRegs:$Rs32, IntRegs:$Ru32, u2_0Imm:$Ii, IntRegs:$Rt32),
"if ($Pv4) memh($Rs32+$Ru32<<#$Ii) = $Rt32",
-V4LDST_tc_st_SLOT01, TypeST>, Enc_11940513, AddrModeRel {
+tc_7bc567a7, TypeST>, Enc_6339d5, AddrModeRel {
let Inst{31-21} = 0b00110100010;
let isPredicated = 1;
let addrMode = BaseRegOffset;
@@ -22908,7 +22989,7 @@ def S4_pstorerhtnew_abs : HInst<
(outs),
(ins PredRegs:$Pv4, u32_0Imm:$Ii, IntRegs:$Rt32),
"if ($Pv4.new) memh(#$Ii) = $Rt32",
-ST_tc_st_SLOT01, TypeST>, Enc_16657398, AddrModeRel {
+tc_336e698c, TypeST>, Enc_1cf4ca, AddrModeRel {
let Inst{2-2} = 0b0;
let Inst{7-7} = 0b1;
let Inst{13-13} = 0b1;
@@ -22916,8 +22997,8 @@ let Inst{31-18} = 0b10101111010000;
let isPredicated = 1;
let addrMode = Absolute;
let accessSize = HalfWordAccess;
-let isExtended = 1;
let isPredicatedNew = 1;
+let isExtended = 1;
let mayStore = 1;
let CextOpcode = "S2_storerh";
let BaseOpcode = "S2_storerhabs";
@@ -22933,7 +23014,7 @@ def S4_pstorerhtnew_io : HInst<
(outs),
(ins PredRegs:$Pv4, IntRegs:$Rs32, u31_1Imm:$Ii, IntRegs:$Rt32),
"if ($Pv4.new) memh($Rs32+#$Ii) = $Rt32",
-V2LDST_tc_st_SLOT01, TypeV2LDST>, Enc_10979813, AddrModeRel {
+tc_20a8e109, TypeV2LDST>, Enc_e8c45e, AddrModeRel {
let Inst{2-2} = 0b0;
let Inst{31-21} = 0b01000010010;
let isPredicated = 1;
@@ -22955,7 +23036,7 @@ def S4_pstorerhtnew_rr : HInst<
(outs),
(ins PredRegs:$Pv4, IntRegs:$Rs32, IntRegs:$Ru32, u2_0Imm:$Ii, IntRegs:$Rt32),
"if ($Pv4.new) memh($Rs32+$Ru32<<#$Ii) = $Rt32",
-V4LDST_tc_st_SLOT01, TypeST>, Enc_11940513, AddrModeRel {
+tc_7639d4b0, TypeST>, Enc_6339d5, AddrModeRel {
let Inst{31-21} = 0b00110110010;
let isPredicated = 1;
let addrMode = BaseRegOffset;
@@ -22971,7 +23052,7 @@ def S4_pstorerhtnew_zomap : HInst<
(outs),
(ins PredRegs:$Pv4, IntRegs:$Rs32, IntRegs:$Rt32),
"if ($Pv4.new) memh($Rs32) = $Rt32",
-PSEUDO, TypeMAPPING> {
+tc_20a8e109, TypeMAPPING> {
let isPseudo = 1;
let isCodeGenOnly = 1;
}
@@ -22979,7 +23060,7 @@ def S4_pstorerif_abs : HInst<
(outs),
(ins PredRegs:$Pv4, u32_0Imm:$Ii, IntRegs:$Rt32),
"if (!$Pv4) memw(#$Ii) = $Rt32",
-ST_tc_st_SLOT01, TypeST>, Enc_16657398, AddrModeRel {
+tc_c85212ca, TypeST>, Enc_1cf4ca, AddrModeRel {
let Inst{2-2} = 0b1;
let Inst{7-7} = 0b1;
let Inst{13-13} = 0b0;
@@ -23004,7 +23085,7 @@ def S4_pstorerif_rr : HInst<
(outs),
(ins PredRegs:$Pv4, IntRegs:$Rs32, IntRegs:$Ru32, u2_0Imm:$Ii, IntRegs:$Rt32),
"if (!$Pv4) memw($Rs32+$Ru32<<#$Ii) = $Rt32",
-V4LDST_tc_st_SLOT01, TypeST>, Enc_11940513, AddrModeRel {
+tc_7bc567a7, TypeST>, Enc_6339d5, AddrModeRel {
let Inst{31-21} = 0b00110101100;
let isPredicated = 1;
let isPredicatedFalse = 1;
@@ -23020,7 +23101,7 @@ def S4_pstorerifnew_abs : HInst<
(outs),
(ins PredRegs:$Pv4, u32_0Imm:$Ii, IntRegs:$Rt32),
"if (!$Pv4.new) memw(#$Ii) = $Rt32",
-ST_tc_st_SLOT01, TypeST>, Enc_16657398, AddrModeRel {
+tc_336e698c, TypeST>, Enc_1cf4ca, AddrModeRel {
let Inst{2-2} = 0b1;
let Inst{7-7} = 0b1;
let Inst{13-13} = 0b1;
@@ -23029,8 +23110,8 @@ let isPredicated = 1;
let isPredicatedFalse = 1;
let addrMode = Absolute;
let accessSize = WordAccess;
-let isExtended = 1;
let isPredicatedNew = 1;
+let isExtended = 1;
let mayStore = 1;
let CextOpcode = "S2_storeri";
let BaseOpcode = "S2_storeriabs";
@@ -23046,7 +23127,7 @@ def S4_pstorerifnew_io : HInst<
(outs),
(ins PredRegs:$Pv4, IntRegs:$Rs32, u30_2Imm:$Ii, IntRegs:$Rt32),
"if (!$Pv4.new) memw($Rs32+#$Ii) = $Rt32",
-V2LDST_tc_st_SLOT01, TypeV2LDST>, Enc_8225953, AddrModeRel {
+tc_20a8e109, TypeV2LDST>, Enc_397f23, AddrModeRel {
let Inst{2-2} = 0b0;
let Inst{31-21} = 0b01000110100;
let isPredicated = 1;
@@ -23069,7 +23150,7 @@ def S4_pstorerifnew_rr : HInst<
(outs),
(ins PredRegs:$Pv4, IntRegs:$Rs32, IntRegs:$Ru32, u2_0Imm:$Ii, IntRegs:$Rt32),
"if (!$Pv4.new) memw($Rs32+$Ru32<<#$Ii) = $Rt32",
-V4LDST_tc_st_SLOT01, TypeST>, Enc_11940513, AddrModeRel {
+tc_7639d4b0, TypeST>, Enc_6339d5, AddrModeRel {
let Inst{31-21} = 0b00110111100;
let isPredicated = 1;
let isPredicatedFalse = 1;
@@ -23086,7 +23167,7 @@ def S4_pstorerifnew_zomap : HInst<
(outs),
(ins PredRegs:$Pv4, IntRegs:$Rs32, IntRegs:$Rt32),
"if (!$Pv4.new) memw($Rs32) = $Rt32",
-PSEUDO, TypeMAPPING> {
+tc_20a8e109, TypeMAPPING> {
let isPseudo = 1;
let isCodeGenOnly = 1;
}
@@ -23094,7 +23175,7 @@ def S4_pstorerinewf_abs : HInst<
(outs),
(ins PredRegs:$Pv4, u32_0Imm:$Ii, IntRegs:$Nt8),
"if (!$Pv4) memw(#$Ii) = $Nt8.new",
-NCJ_tc_3or4stall_SLOT0, TypeST>, Enc_1774350, AddrModeRel {
+tc_2c8fe5ae, TypeST>, Enc_44215c, AddrModeRel {
let Inst{2-2} = 0b1;
let Inst{7-7} = 0b1;
let Inst{13-11} = 0b010;
@@ -23104,9 +23185,9 @@ let isPredicatedFalse = 1;
let addrMode = Absolute;
let accessSize = WordAccess;
let isNVStore = 1;
+let isNewValue = 1;
let isExtended = 1;
let mayStore = 1;
-let isNewValue = 1;
let CextOpcode = "S2_storeri";
let BaseOpcode = "S2_storeriabs";
let DecoderNamespace = "MustExtend";
@@ -23121,7 +23202,7 @@ def S4_pstorerinewf_rr : HInst<
(outs),
(ins PredRegs:$Pv4, IntRegs:$Rs32, IntRegs:$Ru32, u2_0Imm:$Ii, IntRegs:$Nt8),
"if (!$Pv4) memw($Rs32+$Ru32<<#$Ii) = $Nt8.new",
-V4LDST_tc_st_SLOT0, TypeST>, Enc_11000933, AddrModeRel {
+tc_77781686, TypeST>, Enc_47ee5e, AddrModeRel {
let Inst{4-3} = 0b10;
let Inst{31-21} = 0b00110101101;
let isPredicated = 1;
@@ -23129,8 +23210,8 @@ let isPredicatedFalse = 1;
let addrMode = BaseRegOffset;
let accessSize = WordAccess;
let isNVStore = 1;
-let mayStore = 1;
let isNewValue = 1;
+let mayStore = 1;
let CextOpcode = "S2_storeri";
let InputType = "reg";
let BaseOpcode = "S2_storeri_rr";
@@ -23140,7 +23221,7 @@ def S4_pstorerinewfnew_abs : HInst<
(outs),
(ins PredRegs:$Pv4, u32_0Imm:$Ii, IntRegs:$Nt8),
"if (!$Pv4.new) memw(#$Ii) = $Nt8.new",
-NCJ_tc_3or4stall_SLOT0, TypeST>, Enc_1774350, AddrModeRel {
+tc_7986ba30, TypeST>, Enc_44215c, AddrModeRel {
let Inst{2-2} = 0b1;
let Inst{7-7} = 0b1;
let Inst{13-11} = 0b110;
@@ -23150,10 +23231,10 @@ let isPredicatedFalse = 1;
let addrMode = Absolute;
let accessSize = WordAccess;
let isNVStore = 1;
-let isExtended = 1;
let isPredicatedNew = 1;
-let mayStore = 1;
let isNewValue = 1;
+let isExtended = 1;
+let mayStore = 1;
let CextOpcode = "S2_storeri";
let BaseOpcode = "S2_storeriabs";
let DecoderNamespace = "MustExtend";
@@ -23168,7 +23249,7 @@ def S4_pstorerinewfnew_io : HInst<
(outs),
(ins PredRegs:$Pv4, IntRegs:$Rs32, u30_2Imm:$Ii, IntRegs:$Nt8),
"if (!$Pv4.new) memw($Rs32+#$Ii) = $Nt8.new",
-V2LDST_tc_st_SLOT0, TypeV2LDST>, Enc_11224149, AddrModeRel {
+tc_c8f9a6f6, TypeV2LDST>, Enc_8dbdfe, AddrModeRel {
let Inst{2-2} = 0b0;
let Inst{12-11} = 0b10;
let Inst{31-21} = 0b01000110101;
@@ -23178,8 +23259,8 @@ let addrMode = BaseImmOffset;
let accessSize = WordAccess;
let isNVStore = 1;
let isPredicatedNew = 1;
-let mayStore = 1;
let isNewValue = 1;
+let mayStore = 1;
let CextOpcode = "S2_storeri";
let InputType = "imm";
let BaseOpcode = "S2_storeri_io";
@@ -23194,7 +23275,7 @@ def S4_pstorerinewfnew_rr : HInst<
(outs),
(ins PredRegs:$Pv4, IntRegs:$Rs32, IntRegs:$Ru32, u2_0Imm:$Ii, IntRegs:$Nt8),
"if (!$Pv4.new) memw($Rs32+$Ru32<<#$Ii) = $Nt8.new",
-V4LDST_tc_st_SLOT0, TypeST>, Enc_11000933, AddrModeRel {
+tc_8def9c57, TypeST>, Enc_47ee5e, AddrModeRel {
let Inst{4-3} = 0b10;
let Inst{31-21} = 0b00110111101;
let isPredicated = 1;
@@ -23203,8 +23284,8 @@ let addrMode = BaseRegOffset;
let accessSize = WordAccess;
let isNVStore = 1;
let isPredicatedNew = 1;
-let mayStore = 1;
let isNewValue = 1;
+let mayStore = 1;
let CextOpcode = "S2_storeri";
let InputType = "reg";
let BaseOpcode = "S2_storeri_rr";
@@ -23214,7 +23295,7 @@ def S4_pstorerinewfnew_zomap : HInst<
(outs),
(ins PredRegs:$Pv4, IntRegs:$Rs32, IntRegs:$Nt8),
"if (!$Pv4.new) memw($Rs32) = $Nt8.new",
-PSEUDO, TypeMAPPING> {
+tc_c8f9a6f6, TypeMAPPING> {
let isPseudo = 1;
let isCodeGenOnly = 1;
let opNewValue = 2;
@@ -23223,7 +23304,7 @@ def S4_pstorerinewt_abs : HInst<
(outs),
(ins PredRegs:$Pv4, u32_0Imm:$Ii, IntRegs:$Nt8),
"if ($Pv4) memw(#$Ii) = $Nt8.new",
-NCJ_tc_3or4stall_SLOT0, TypeST>, Enc_1774350, AddrModeRel {
+tc_2c8fe5ae, TypeST>, Enc_44215c, AddrModeRel {
let Inst{2-2} = 0b0;
let Inst{7-7} = 0b1;
let Inst{13-11} = 0b010;
@@ -23232,9 +23313,9 @@ let isPredicated = 1;
let addrMode = Absolute;
let accessSize = WordAccess;
let isNVStore = 1;
+let isNewValue = 1;
let isExtended = 1;
let mayStore = 1;
-let isNewValue = 1;
let CextOpcode = "S2_storeri";
let BaseOpcode = "S2_storeriabs";
let DecoderNamespace = "MustExtend";
@@ -23249,15 +23330,15 @@ def S4_pstorerinewt_rr : HInst<
(outs),
(ins PredRegs:$Pv4, IntRegs:$Rs32, IntRegs:$Ru32, u2_0Imm:$Ii, IntRegs:$Nt8),
"if ($Pv4) memw($Rs32+$Ru32<<#$Ii) = $Nt8.new",
-V4LDST_tc_st_SLOT0, TypeST>, Enc_11000933, AddrModeRel {
+tc_77781686, TypeST>, Enc_47ee5e, AddrModeRel {
let Inst{4-3} = 0b10;
let Inst{31-21} = 0b00110100101;
let isPredicated = 1;
let addrMode = BaseRegOffset;
let accessSize = WordAccess;
let isNVStore = 1;
-let mayStore = 1;
let isNewValue = 1;
+let mayStore = 1;
let CextOpcode = "S2_storeri";
let InputType = "reg";
let BaseOpcode = "S2_storeri_rr";
@@ -23267,7 +23348,7 @@ def S4_pstorerinewtnew_abs : HInst<
(outs),
(ins PredRegs:$Pv4, u32_0Imm:$Ii, IntRegs:$Nt8),
"if ($Pv4.new) memw(#$Ii) = $Nt8.new",
-NCJ_tc_3or4stall_SLOT0, TypeST>, Enc_1774350, AddrModeRel {
+tc_7986ba30, TypeST>, Enc_44215c, AddrModeRel {
let Inst{2-2} = 0b0;
let Inst{7-7} = 0b1;
let Inst{13-11} = 0b110;
@@ -23276,10 +23357,10 @@ let isPredicated = 1;
let addrMode = Absolute;
let accessSize = WordAccess;
let isNVStore = 1;
-let isExtended = 1;
let isPredicatedNew = 1;
-let mayStore = 1;
let isNewValue = 1;
+let isExtended = 1;
+let mayStore = 1;
let CextOpcode = "S2_storeri";
let BaseOpcode = "S2_storeriabs";
let DecoderNamespace = "MustExtend";
@@ -23294,7 +23375,7 @@ def S4_pstorerinewtnew_io : HInst<
(outs),
(ins PredRegs:$Pv4, IntRegs:$Rs32, u30_2Imm:$Ii, IntRegs:$Nt8),
"if ($Pv4.new) memw($Rs32+#$Ii) = $Nt8.new",
-V2LDST_tc_st_SLOT0, TypeV2LDST>, Enc_11224149, AddrModeRel {
+tc_c8f9a6f6, TypeV2LDST>, Enc_8dbdfe, AddrModeRel {
let Inst{2-2} = 0b0;
let Inst{12-11} = 0b10;
let Inst{31-21} = 0b01000010101;
@@ -23303,8 +23384,8 @@ let addrMode = BaseImmOffset;
let accessSize = WordAccess;
let isNVStore = 1;
let isPredicatedNew = 1;
-let mayStore = 1;
let isNewValue = 1;
+let mayStore = 1;
let CextOpcode = "S2_storeri";
let InputType = "imm";
let BaseOpcode = "S2_storeri_io";
@@ -23319,7 +23400,7 @@ def S4_pstorerinewtnew_rr : HInst<
(outs),
(ins PredRegs:$Pv4, IntRegs:$Rs32, IntRegs:$Ru32, u2_0Imm:$Ii, IntRegs:$Nt8),
"if ($Pv4.new) memw($Rs32+$Ru32<<#$Ii) = $Nt8.new",
-V4LDST_tc_st_SLOT0, TypeST>, Enc_11000933, AddrModeRel {
+tc_8def9c57, TypeST>, Enc_47ee5e, AddrModeRel {
let Inst{4-3} = 0b10;
let Inst{31-21} = 0b00110110101;
let isPredicated = 1;
@@ -23327,8 +23408,8 @@ let addrMode = BaseRegOffset;
let accessSize = WordAccess;
let isNVStore = 1;
let isPredicatedNew = 1;
-let mayStore = 1;
let isNewValue = 1;
+let mayStore = 1;
let CextOpcode = "S2_storeri";
let InputType = "reg";
let BaseOpcode = "S2_storeri_rr";
@@ -23338,7 +23419,7 @@ def S4_pstorerinewtnew_zomap : HInst<
(outs),
(ins PredRegs:$Pv4, IntRegs:$Rs32, IntRegs:$Nt8),
"if ($Pv4.new) memw($Rs32) = $Nt8.new",
-PSEUDO, TypeMAPPING> {
+tc_c8f9a6f6, TypeMAPPING> {
let isPseudo = 1;
let isCodeGenOnly = 1;
let opNewValue = 2;
@@ -23347,7 +23428,7 @@ def S4_pstorerit_abs : HInst<
(outs),
(ins PredRegs:$Pv4, u32_0Imm:$Ii, IntRegs:$Rt32),
"if ($Pv4) memw(#$Ii) = $Rt32",
-ST_tc_st_SLOT01, TypeST>, Enc_16657398, AddrModeRel {
+tc_c85212ca, TypeST>, Enc_1cf4ca, AddrModeRel {
let Inst{2-2} = 0b0;
let Inst{7-7} = 0b1;
let Inst{13-13} = 0b0;
@@ -23371,7 +23452,7 @@ def S4_pstorerit_rr : HInst<
(outs),
(ins PredRegs:$Pv4, IntRegs:$Rs32, IntRegs:$Ru32, u2_0Imm:$Ii, IntRegs:$Rt32),
"if ($Pv4) memw($Rs32+$Ru32<<#$Ii) = $Rt32",
-V4LDST_tc_st_SLOT01, TypeST>, Enc_11940513, AddrModeRel {
+tc_7bc567a7, TypeST>, Enc_6339d5, AddrModeRel {
let Inst{31-21} = 0b00110100100;
let isPredicated = 1;
let addrMode = BaseRegOffset;
@@ -23386,7 +23467,7 @@ def S4_pstoreritnew_abs : HInst<
(outs),
(ins PredRegs:$Pv4, u32_0Imm:$Ii, IntRegs:$Rt32),
"if ($Pv4.new) memw(#$Ii) = $Rt32",
-ST_tc_st_SLOT01, TypeST>, Enc_16657398, AddrModeRel {
+tc_336e698c, TypeST>, Enc_1cf4ca, AddrModeRel {
let Inst{2-2} = 0b0;
let Inst{7-7} = 0b1;
let Inst{13-13} = 0b1;
@@ -23394,8 +23475,8 @@ let Inst{31-18} = 0b10101111100000;
let isPredicated = 1;
let addrMode = Absolute;
let accessSize = WordAccess;
-let isExtended = 1;
let isPredicatedNew = 1;
+let isExtended = 1;
let mayStore = 1;
let CextOpcode = "S2_storeri";
let BaseOpcode = "S2_storeriabs";
@@ -23411,7 +23492,7 @@ def S4_pstoreritnew_io : HInst<
(outs),
(ins PredRegs:$Pv4, IntRegs:$Rs32, u30_2Imm:$Ii, IntRegs:$Rt32),
"if ($Pv4.new) memw($Rs32+#$Ii) = $Rt32",
-V2LDST_tc_st_SLOT01, TypeV2LDST>, Enc_8225953, AddrModeRel {
+tc_20a8e109, TypeV2LDST>, Enc_397f23, AddrModeRel {
let Inst{2-2} = 0b0;
let Inst{31-21} = 0b01000010100;
let isPredicated = 1;
@@ -23433,7 +23514,7 @@ def S4_pstoreritnew_rr : HInst<
(outs),
(ins PredRegs:$Pv4, IntRegs:$Rs32, IntRegs:$Ru32, u2_0Imm:$Ii, IntRegs:$Rt32),
"if ($Pv4.new) memw($Rs32+$Ru32<<#$Ii) = $Rt32",
-V4LDST_tc_st_SLOT01, TypeST>, Enc_11940513, AddrModeRel {
+tc_7639d4b0, TypeST>, Enc_6339d5, AddrModeRel {
let Inst{31-21} = 0b00110110100;
let isPredicated = 1;
let addrMode = BaseRegOffset;
@@ -23449,7 +23530,7 @@ def S4_pstoreritnew_zomap : HInst<
(outs),
(ins PredRegs:$Pv4, IntRegs:$Rs32, IntRegs:$Rt32),
"if ($Pv4.new) memw($Rs32) = $Rt32",
-PSEUDO, TypeMAPPING> {
+tc_20a8e109, TypeMAPPING> {
let isPseudo = 1;
let isCodeGenOnly = 1;
}
@@ -23457,20 +23538,20 @@ def S4_stored_locked : HInst<
(outs PredRegs:$Pd4),
(ins IntRegs:$Rs32, DoubleRegs:$Rtt32),
"memd_locked($Rs32,$Pd4) = $Rtt32",
-ST_tc_ld_SLOT0, TypeST>, Enc_2921694 {
+tc_7d01cbdc, TypeST>, Enc_d7dc10 {
let Inst{7-2} = 0b000000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b10100000111;
let accessSize = DoubleWordAccess;
+let isPredicateLate = 1;
let isSoloAX = 1;
let mayStore = 1;
-let isPredicateLate = 1;
}
def S4_storeirb_io : HInst<
(outs),
(ins IntRegs:$Rs32, u6_0Imm:$Ii, s32_0Imm:$II),
"memb($Rs32+#$Ii) = #$II",
-V4LDST_tc_st_SLOT01, TypeST>, Enc_11282123, PredNewRel {
+tc_fcee8723, TypeST>, Enc_8203bb, PredNewRel {
let Inst{31-21} = 0b00111100000;
let addrMode = BaseImmOffset;
let accessSize = ByteAccess;
@@ -23489,7 +23570,7 @@ def S4_storeirb_zomap : HInst<
(outs),
(ins IntRegs:$Rs32, s8_0Imm:$II),
"memb($Rs32) = #$II",
-PSEUDO, TypeMAPPING> {
+tc_fcee8723, TypeMAPPING> {
let isPseudo = 1;
let isCodeGenOnly = 1;
}
@@ -23497,7 +23578,7 @@ def S4_storeirbf_io : HInst<
(outs),
(ins PredRegs:$Pv4, IntRegs:$Rs32, u6_0Imm:$Ii, s32_0Imm:$II),
"if (!$Pv4) memb($Rs32+#$Ii) = #$II",
-V4LDST_tc_st_SLOT01, TypeST>, Enc_5967898, PredNewRel {
+tc_1e69aa99, TypeST>, Enc_d7a65e, PredNewRel {
let Inst{31-21} = 0b00111000100;
let isPredicated = 1;
let isPredicatedFalse = 1;
@@ -23517,7 +23598,7 @@ def S4_storeirbf_zomap : HInst<
(outs),
(ins PredRegs:$Pv4, IntRegs:$Rs32, s6_0Imm:$II),
"if (!$Pv4) memb($Rs32) = #$II",
-PSEUDO, TypeMAPPING> {
+tc_1e69aa99, TypeMAPPING> {
let isPseudo = 1;
let isCodeGenOnly = 1;
}
@@ -23525,7 +23606,7 @@ def S4_storeirbfnew_io : HInst<
(outs),
(ins PredRegs:$Pv4, IntRegs:$Rs32, u6_0Imm:$Ii, s32_0Imm:$II),
"if (!$Pv4.new) memb($Rs32+#$Ii) = #$II",
-V4LDST_tc_st_SLOT01, TypeST>, Enc_5967898, PredNewRel {
+tc_8f0a6bad, TypeST>, Enc_d7a65e, PredNewRel {
let Inst{31-21} = 0b00111001100;
let isPredicated = 1;
let isPredicatedFalse = 1;
@@ -23546,7 +23627,7 @@ def S4_storeirbfnew_zomap : HInst<
(outs),
(ins PredRegs:$Pv4, IntRegs:$Rs32, s6_0Imm:$II),
"if (!$Pv4.new) memb($Rs32) = #$II",
-PSEUDO, TypeMAPPING> {
+tc_8f0a6bad, TypeMAPPING> {
let isPseudo = 1;
let isCodeGenOnly = 1;
}
@@ -23554,7 +23635,7 @@ def S4_storeirbt_io : HInst<
(outs),
(ins PredRegs:$Pv4, IntRegs:$Rs32, u6_0Imm:$Ii, s32_0Imm:$II),
"if ($Pv4) memb($Rs32+#$Ii) = #$II",
-V4LDST_tc_st_SLOT01, TypeST>, Enc_5967898, PredNewRel {
+tc_1e69aa99, TypeST>, Enc_d7a65e, PredNewRel {
let Inst{31-21} = 0b00111000000;
let isPredicated = 1;
let addrMode = BaseImmOffset;
@@ -23573,7 +23654,7 @@ def S4_storeirbt_zomap : HInst<
(outs),
(ins PredRegs:$Pv4, IntRegs:$Rs32, s6_0Imm:$II),
"if ($Pv4) memb($Rs32) = #$II",
-PSEUDO, TypeMAPPING> {
+tc_1e69aa99, TypeMAPPING> {
let isPseudo = 1;
let isCodeGenOnly = 1;
}
@@ -23581,7 +23662,7 @@ def S4_storeirbtnew_io : HInst<
(outs),
(ins PredRegs:$Pv4, IntRegs:$Rs32, u6_0Imm:$Ii, s32_0Imm:$II),
"if ($Pv4.new) memb($Rs32+#$Ii) = #$II",
-V4LDST_tc_st_SLOT01, TypeST>, Enc_5967898, PredNewRel {
+tc_8f0a6bad, TypeST>, Enc_d7a65e, PredNewRel {
let Inst{31-21} = 0b00111001000;
let isPredicated = 1;
let addrMode = BaseImmOffset;
@@ -23601,7 +23682,7 @@ def S4_storeirbtnew_zomap : HInst<
(outs),
(ins PredRegs:$Pv4, IntRegs:$Rs32, s6_0Imm:$II),
"if ($Pv4.new) memb($Rs32) = #$II",
-PSEUDO, TypeMAPPING> {
+tc_8f0a6bad, TypeMAPPING> {
let isPseudo = 1;
let isCodeGenOnly = 1;
}
@@ -23609,7 +23690,7 @@ def S4_storeirh_io : HInst<
(outs),
(ins IntRegs:$Rs32, u6_1Imm:$Ii, s32_0Imm:$II),
"memh($Rs32+#$Ii) = #$II",
-V4LDST_tc_st_SLOT01, TypeST>, Enc_10282127, PredNewRel {
+tc_fcee8723, TypeST>, Enc_a803e0, PredNewRel {
let Inst{31-21} = 0b00111100001;
let addrMode = BaseImmOffset;
let accessSize = HalfWordAccess;
@@ -23628,7 +23709,7 @@ def S4_storeirh_zomap : HInst<
(outs),
(ins IntRegs:$Rs32, s8_0Imm:$II),
"memh($Rs32) = #$II",
-PSEUDO, TypeMAPPING> {
+tc_fcee8723, TypeMAPPING> {
let isPseudo = 1;
let isCodeGenOnly = 1;
}
@@ -23636,7 +23717,7 @@ def S4_storeirhf_io : HInst<
(outs),
(ins PredRegs:$Pv4, IntRegs:$Rs32, u6_1Imm:$Ii, s32_0Imm:$II),
"if (!$Pv4) memh($Rs32+#$Ii) = #$II",
-V4LDST_tc_st_SLOT01, TypeST>, Enc_4967902, PredNewRel {
+tc_1e69aa99, TypeST>, Enc_f20719, PredNewRel {
let Inst{31-21} = 0b00111000101;
let isPredicated = 1;
let isPredicatedFalse = 1;
@@ -23656,7 +23737,7 @@ def S4_storeirhf_zomap : HInst<
(outs),
(ins PredRegs:$Pv4, IntRegs:$Rs32, s6_0Imm:$II),
"if (!$Pv4) memh($Rs32) = #$II",
-PSEUDO, TypeMAPPING> {
+tc_1e69aa99, TypeMAPPING> {
let isPseudo = 1;
let isCodeGenOnly = 1;
}
@@ -23664,7 +23745,7 @@ def S4_storeirhfnew_io : HInst<
(outs),
(ins PredRegs:$Pv4, IntRegs:$Rs32, u6_1Imm:$Ii, s32_0Imm:$II),
"if (!$Pv4.new) memh($Rs32+#$Ii) = #$II",
-V4LDST_tc_st_SLOT01, TypeST>, Enc_4967902, PredNewRel {
+tc_8f0a6bad, TypeST>, Enc_f20719, PredNewRel {
let Inst{31-21} = 0b00111001101;
let isPredicated = 1;
let isPredicatedFalse = 1;
@@ -23685,7 +23766,7 @@ def S4_storeirhfnew_zomap : HInst<
(outs),
(ins PredRegs:$Pv4, IntRegs:$Rs32, s6_0Imm:$II),
"if (!$Pv4.new) memh($Rs32) = #$II",
-PSEUDO, TypeMAPPING> {
+tc_8f0a6bad, TypeMAPPING> {
let isPseudo = 1;
let isCodeGenOnly = 1;
}
@@ -23693,7 +23774,7 @@ def S4_storeirht_io : HInst<
(outs),
(ins PredRegs:$Pv4, IntRegs:$Rs32, u6_1Imm:$Ii, s32_0Imm:$II),
"if ($Pv4) memh($Rs32+#$Ii) = #$II",
-V4LDST_tc_st_SLOT01, TypeST>, Enc_4967902, PredNewRel {
+tc_1e69aa99, TypeST>, Enc_f20719, PredNewRel {
let Inst{31-21} = 0b00111000001;
let isPredicated = 1;
let addrMode = BaseImmOffset;
@@ -23712,7 +23793,7 @@ def S4_storeirht_zomap : HInst<
(outs),
(ins PredRegs:$Pv4, IntRegs:$Rs32, s6_0Imm:$II),
"if ($Pv4) memh($Rs32) = #$II",
-PSEUDO, TypeMAPPING> {
+tc_1e69aa99, TypeMAPPING> {
let isPseudo = 1;
let isCodeGenOnly = 1;
}
@@ -23720,7 +23801,7 @@ def S4_storeirhtnew_io : HInst<
(outs),
(ins PredRegs:$Pv4, IntRegs:$Rs32, u6_1Imm:$Ii, s32_0Imm:$II),
"if ($Pv4.new) memh($Rs32+#$Ii) = #$II",
-V4LDST_tc_st_SLOT01, TypeST>, Enc_4967902, PredNewRel {
+tc_8f0a6bad, TypeST>, Enc_f20719, PredNewRel {
let Inst{31-21} = 0b00111001001;
let isPredicated = 1;
let addrMode = BaseImmOffset;
@@ -23740,7 +23821,7 @@ def S4_storeirhtnew_zomap : HInst<
(outs),
(ins PredRegs:$Pv4, IntRegs:$Rs32, s6_0Imm:$II),
"if ($Pv4.new) memh($Rs32) = #$II",
-PSEUDO, TypeMAPPING> {
+tc_8f0a6bad, TypeMAPPING> {
let isPseudo = 1;
let isCodeGenOnly = 1;
}
@@ -23748,7 +23829,7 @@ def S4_storeiri_io : HInst<
(outs),
(ins IntRegs:$Rs32, u6_2Imm:$Ii, s32_0Imm:$II),
"memw($Rs32+#$Ii) = #$II",
-V4LDST_tc_st_SLOT01, TypeST>, Enc_9282127, PredNewRel {
+tc_fcee8723, TypeST>, Enc_f37377, PredNewRel {
let Inst{31-21} = 0b00111100010;
let addrMode = BaseImmOffset;
let accessSize = WordAccess;
@@ -23767,7 +23848,7 @@ def S4_storeiri_zomap : HInst<
(outs),
(ins IntRegs:$Rs32, s8_0Imm:$II),
"memw($Rs32) = #$II",
-PSEUDO, TypeMAPPING> {
+tc_fcee8723, TypeMAPPING> {
let isPseudo = 1;
let isCodeGenOnly = 1;
}
@@ -23775,7 +23856,7 @@ def S4_storeirif_io : HInst<
(outs),
(ins PredRegs:$Pv4, IntRegs:$Rs32, u6_2Imm:$Ii, s32_0Imm:$II),
"if (!$Pv4) memw($Rs32+#$Ii) = #$II",
-V4LDST_tc_st_SLOT01, TypeST>, Enc_3967902, PredNewRel {
+tc_1e69aa99, TypeST>, Enc_5ccba9, PredNewRel {
let Inst{31-21} = 0b00111000110;
let isPredicated = 1;
let isPredicatedFalse = 1;
@@ -23795,7 +23876,7 @@ def S4_storeirif_zomap : HInst<
(outs),
(ins PredRegs:$Pv4, IntRegs:$Rs32, s6_0Imm:$II),
"if (!$Pv4) memw($Rs32) = #$II",
-PSEUDO, TypeMAPPING> {
+tc_1e69aa99, TypeMAPPING> {
let isPseudo = 1;
let isCodeGenOnly = 1;
}
@@ -23803,7 +23884,7 @@ def S4_storeirifnew_io : HInst<
(outs),
(ins PredRegs:$Pv4, IntRegs:$Rs32, u6_2Imm:$Ii, s32_0Imm:$II),
"if (!$Pv4.new) memw($Rs32+#$Ii) = #$II",
-V4LDST_tc_st_SLOT01, TypeST>, Enc_3967902, PredNewRel {
+tc_8f0a6bad, TypeST>, Enc_5ccba9, PredNewRel {
let Inst{31-21} = 0b00111001110;
let isPredicated = 1;
let isPredicatedFalse = 1;
@@ -23824,7 +23905,7 @@ def S4_storeirifnew_zomap : HInst<
(outs),
(ins PredRegs:$Pv4, IntRegs:$Rs32, s6_0Imm:$II),
"if (!$Pv4.new) memw($Rs32) = #$II",
-PSEUDO, TypeMAPPING> {
+tc_8f0a6bad, TypeMAPPING> {
let isPseudo = 1;
let isCodeGenOnly = 1;
}
@@ -23832,7 +23913,7 @@ def S4_storeirit_io : HInst<
(outs),
(ins PredRegs:$Pv4, IntRegs:$Rs32, u6_2Imm:$Ii, s32_0Imm:$II),
"if ($Pv4) memw($Rs32+#$Ii) = #$II",
-V4LDST_tc_st_SLOT01, TypeST>, Enc_3967902, PredNewRel {
+tc_1e69aa99, TypeST>, Enc_5ccba9, PredNewRel {
let Inst{31-21} = 0b00111000010;
let isPredicated = 1;
let addrMode = BaseImmOffset;
@@ -23851,7 +23932,7 @@ def S4_storeirit_zomap : HInst<
(outs),
(ins PredRegs:$Pv4, IntRegs:$Rs32, s6_0Imm:$II),
"if ($Pv4) memw($Rs32) = #$II",
-PSEUDO, TypeMAPPING> {
+tc_1e69aa99, TypeMAPPING> {
let isPseudo = 1;
let isCodeGenOnly = 1;
}
@@ -23859,7 +23940,7 @@ def S4_storeiritnew_io : HInst<
(outs),
(ins PredRegs:$Pv4, IntRegs:$Rs32, u6_2Imm:$Ii, s32_0Imm:$II),
"if ($Pv4.new) memw($Rs32+#$Ii) = #$II",
-V4LDST_tc_st_SLOT01, TypeST>, Enc_3967902, PredNewRel {
+tc_8f0a6bad, TypeST>, Enc_5ccba9, PredNewRel {
let Inst{31-21} = 0b00111001010;
let isPredicated = 1;
let addrMode = BaseImmOffset;
@@ -23879,7 +23960,7 @@ def S4_storeiritnew_zomap : HInst<
(outs),
(ins PredRegs:$Pv4, IntRegs:$Rs32, s6_0Imm:$II),
"if ($Pv4.new) memw($Rs32) = #$II",
-PSEUDO, TypeMAPPING> {
+tc_8f0a6bad, TypeMAPPING> {
let isPseudo = 1;
let isCodeGenOnly = 1;
}
@@ -23887,7 +23968,7 @@ def S4_storerb_ap : HInst<
(outs IntRegs:$Re32),
(ins u32_0Imm:$II, IntRegs:$Rt32),
"memb($Re32=#$II) = $Rt32",
-ST_tc_st_SLOT01, TypeST>, Enc_11477246, AddrModeRel {
+tc_336e698c, TypeST>, Enc_8bcba4, AddrModeRel {
let Inst{7-6} = 0b10;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b10101011000;
@@ -23910,7 +23991,7 @@ def S4_storerb_rr : HInst<
(outs),
(ins IntRegs:$Rs32, IntRegs:$Ru32, u2_0Imm:$Ii, IntRegs:$Rt32),
"memb($Rs32+$Ru32<<#$Ii) = $Rt32",
-V4LDST_tc_st_SLOT01, TypeST>, Enc_14046916, AddrModeRel, ImmRegShl {
+tc_45631a8d, TypeST>, Enc_eca7c8, AddrModeRel, ImmRegShl {
let Inst{6-5} = 0b00;
let Inst{31-21} = 0b00111011000;
let addrMode = BaseRegOffset;
@@ -23926,7 +24007,7 @@ def S4_storerb_ur : HInst<
(outs),
(ins IntRegs:$Ru32, u2_0Imm:$Ii, u32_0Imm:$II, IntRegs:$Rt32),
"memb($Ru32<<#$Ii+#$II) = $Rt32",
-ST_tc_st_SLOT01, TypeST>, Enc_14689096, AddrModeRel, ImmRegShl {
+tc_a4567c39, TypeST>, Enc_9ea4cf, AddrModeRel, ImmRegShl {
let Inst{7-7} = 0b1;
let Inst{31-21} = 0b10101101000;
let addrMode = BaseLongOffset;
@@ -23948,7 +24029,7 @@ def S4_storerbnew_ap : HInst<
(outs IntRegs:$Re32),
(ins u32_0Imm:$II, IntRegs:$Nt8),
"memb($Re32=#$II) = $Nt8.new",
-NCJ_tc_3or4stall_SLOT0, TypeST>, Enc_14193700, AddrModeRel {
+tc_7986ba30, TypeST>, Enc_724154, AddrModeRel {
let Inst{7-6} = 0b10;
let Inst{13-11} = 0b000;
let Inst{31-21} = 0b10101011101;
@@ -23957,9 +24038,9 @@ let opNewValue = 0;
let addrMode = AbsoluteSet;
let accessSize = ByteAccess;
let isNVStore = 1;
+let isNewValue = 1;
let isExtended = 1;
let mayStore = 1;
-let isNewValue = 1;
let BaseOpcode = "S2_storerb_ap";
let DecoderNamespace = "MustExtend";
let isExtendable = 1;
@@ -23973,14 +24054,14 @@ def S4_storerbnew_rr : HInst<
(outs),
(ins IntRegs:$Rs32, IntRegs:$Ru32, u2_0Imm:$Ii, IntRegs:$Nt8),
"memb($Rs32+$Ru32<<#$Ii) = $Nt8.new",
-V4LDST_tc_st_SLOT0, TypeST>, Enc_5486172, AddrModeRel {
+tc_be995eaf, TypeST>, Enc_c6220b, AddrModeRel {
let Inst{6-3} = 0b0000;
let Inst{31-21} = 0b00111011101;
let addrMode = BaseRegOffset;
let accessSize = ByteAccess;
let isNVStore = 1;
-let mayStore = 1;
let isNewValue = 1;
+let mayStore = 1;
let CextOpcode = "S2_storerb";
let InputType = "reg";
let BaseOpcode = "S4_storerb_rr";
@@ -23991,16 +24072,16 @@ def S4_storerbnew_ur : HInst<
(outs),
(ins IntRegs:$Ru32, u2_0Imm:$Ii, u32_0Imm:$II, IntRegs:$Nt8),
"memb($Ru32<<#$Ii+#$II) = $Nt8.new",
-NCJ_tc_3or4stall_SLOT0, TypeST>, Enc_10076500, AddrModeRel {
+tc_210b2456, TypeST>, Enc_7eb485, AddrModeRel {
let Inst{7-7} = 0b1;
let Inst{12-11} = 0b00;
let Inst{31-21} = 0b10101101101;
let addrMode = BaseLongOffset;
let accessSize = ByteAccess;
let isNVStore = 1;
+let isNewValue = 1;
let isExtended = 1;
let mayStore = 1;
-let isNewValue = 1;
let CextOpcode = "S2_storerb";
let BaseOpcode = "S4_storerb_ur";
let DecoderNamespace = "MustExtend";
@@ -24015,7 +24096,7 @@ def S4_storerd_ap : HInst<
(outs IntRegs:$Re32),
(ins u32_0Imm:$II, DoubleRegs:$Rtt32),
"memd($Re32=#$II) = $Rtt32",
-ST_tc_st_SLOT01, TypeST>, Enc_8131399 {
+tc_336e698c, TypeST>, Enc_c7a204 {
let Inst{7-6} = 0b10;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b10101011110;
@@ -24037,7 +24118,7 @@ def S4_storerd_rr : HInst<
(outs),
(ins IntRegs:$Rs32, IntRegs:$Ru32, u2_0Imm:$Ii, DoubleRegs:$Rtt32),
"memd($Rs32+$Ru32<<#$Ii) = $Rtt32",
-V4LDST_tc_st_SLOT01, TypeST>, Enc_9772987, AddrModeRel, ImmRegShl {
+tc_45631a8d, TypeST>, Enc_55355c, AddrModeRel, ImmRegShl {
let Inst{6-5} = 0b00;
let Inst{31-21} = 0b00111011110;
let addrMode = BaseRegOffset;
@@ -24052,7 +24133,7 @@ def S4_storerd_ur : HInst<
(outs),
(ins IntRegs:$Ru32, u2_0Imm:$Ii, u32_0Imm:$II, DoubleRegs:$Rtt32),
"memd($Ru32<<#$Ii+#$II) = $Rtt32",
-ST_tc_st_SLOT01, TypeST>, Enc_12848507, AddrModeRel, ImmRegShl {
+tc_a4567c39, TypeST>, Enc_f79415, AddrModeRel, ImmRegShl {
let Inst{7-7} = 0b1;
let Inst{31-21} = 0b10101101110;
let addrMode = BaseLongOffset;
@@ -24073,7 +24154,7 @@ def S4_storerf_ap : HInst<
(outs IntRegs:$Re32),
(ins u32_0Imm:$II, IntRegs:$Rt32),
"memh($Re32=#$II) = $Rt32.h",
-ST_tc_st_SLOT01, TypeST>, Enc_11477246 {
+tc_336e698c, TypeST>, Enc_8bcba4 {
let Inst{7-6} = 0b10;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b10101011011;
@@ -24095,7 +24176,7 @@ def S4_storerf_rr : HInst<
(outs),
(ins IntRegs:$Rs32, IntRegs:$Ru32, u2_0Imm:$Ii, IntRegs:$Rt32),
"memh($Rs32+$Ru32<<#$Ii) = $Rt32.h",
-V4LDST_tc_st_SLOT01, TypeST>, Enc_14046916, AddrModeRel, ImmRegShl {
+tc_45631a8d, TypeST>, Enc_eca7c8, AddrModeRel, ImmRegShl {
let Inst{6-5} = 0b00;
let Inst{31-21} = 0b00111011011;
let addrMode = BaseRegOffset;
@@ -24110,7 +24191,7 @@ def S4_storerf_ur : HInst<
(outs),
(ins IntRegs:$Ru32, u2_0Imm:$Ii, u32_0Imm:$II, IntRegs:$Rt32),
"memh($Ru32<<#$Ii+#$II) = $Rt32.h",
-ST_tc_st_SLOT01, TypeST>, Enc_14689096, AddrModeRel, ImmRegShl {
+tc_a4567c39, TypeST>, Enc_9ea4cf, AddrModeRel, ImmRegShl {
let Inst{7-7} = 0b1;
let Inst{31-21} = 0b10101101011;
let addrMode = BaseLongOffset;
@@ -24131,7 +24212,7 @@ def S4_storerh_ap : HInst<
(outs IntRegs:$Re32),
(ins u32_0Imm:$II, IntRegs:$Rt32),
"memh($Re32=#$II) = $Rt32",
-ST_tc_st_SLOT01, TypeST>, Enc_11477246, AddrModeRel {
+tc_336e698c, TypeST>, Enc_8bcba4, AddrModeRel {
let Inst{7-6} = 0b10;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b10101011010;
@@ -24154,7 +24235,7 @@ def S4_storerh_rr : HInst<
(outs),
(ins IntRegs:$Rs32, IntRegs:$Ru32, u2_0Imm:$Ii, IntRegs:$Rt32),
"memh($Rs32+$Ru32<<#$Ii) = $Rt32",
-V4LDST_tc_st_SLOT01, TypeST>, Enc_14046916, AddrModeRel, ImmRegShl {
+tc_45631a8d, TypeST>, Enc_eca7c8, AddrModeRel, ImmRegShl {
let Inst{6-5} = 0b00;
let Inst{31-21} = 0b00111011010;
let addrMode = BaseRegOffset;
@@ -24170,7 +24251,7 @@ def S4_storerh_ur : HInst<
(outs),
(ins IntRegs:$Ru32, u2_0Imm:$Ii, u32_0Imm:$II, IntRegs:$Rt32),
"memh($Ru32<<#$Ii+#$II) = $Rt32",
-ST_tc_st_SLOT01, TypeST>, Enc_14689096, AddrModeRel, ImmRegShl {
+tc_a4567c39, TypeST>, Enc_9ea4cf, AddrModeRel, ImmRegShl {
let Inst{7-7} = 0b1;
let Inst{31-21} = 0b10101101010;
let addrMode = BaseLongOffset;
@@ -24192,7 +24273,7 @@ def S4_storerhnew_ap : HInst<
(outs IntRegs:$Re32),
(ins u32_0Imm:$II, IntRegs:$Nt8),
"memh($Re32=#$II) = $Nt8.new",
-NCJ_tc_3or4stall_SLOT0, TypeST>, Enc_14193700, AddrModeRel {
+tc_7986ba30, TypeST>, Enc_724154, AddrModeRel {
let Inst{7-6} = 0b10;
let Inst{13-11} = 0b001;
let Inst{31-21} = 0b10101011101;
@@ -24201,9 +24282,9 @@ let opNewValue = 0;
let addrMode = AbsoluteSet;
let accessSize = HalfWordAccess;
let isNVStore = 1;
+let isNewValue = 1;
let isExtended = 1;
let mayStore = 1;
-let isNewValue = 1;
let BaseOpcode = "S2_storerh_ap";
let DecoderNamespace = "MustExtend";
let isExtendable = 1;
@@ -24217,14 +24298,14 @@ def S4_storerhnew_rr : HInst<
(outs),
(ins IntRegs:$Rs32, IntRegs:$Ru32, u2_0Imm:$Ii, IntRegs:$Nt8),
"memh($Rs32+$Ru32<<#$Ii) = $Nt8.new",
-V4LDST_tc_st_SLOT0, TypeST>, Enc_5486172, AddrModeRel {
+tc_be995eaf, TypeST>, Enc_c6220b, AddrModeRel {
let Inst{6-3} = 0b0001;
let Inst{31-21} = 0b00111011101;
let addrMode = BaseRegOffset;
let accessSize = HalfWordAccess;
let isNVStore = 1;
-let mayStore = 1;
let isNewValue = 1;
+let mayStore = 1;
let CextOpcode = "S2_storerh";
let InputType = "reg";
let BaseOpcode = "S2_storerh_rr";
@@ -24235,16 +24316,16 @@ def S4_storerhnew_ur : HInst<
(outs),
(ins IntRegs:$Ru32, u2_0Imm:$Ii, u32_0Imm:$II, IntRegs:$Nt8),
"memh($Ru32<<#$Ii+#$II) = $Nt8.new",
-NCJ_tc_3or4stall_SLOT0, TypeST>, Enc_10076500, AddrModeRel {
+tc_210b2456, TypeST>, Enc_7eb485, AddrModeRel {
let Inst{7-7} = 0b1;
let Inst{12-11} = 0b01;
let Inst{31-21} = 0b10101101101;
let addrMode = BaseLongOffset;
let accessSize = HalfWordAccess;
let isNVStore = 1;
+let isNewValue = 1;
let isExtended = 1;
let mayStore = 1;
-let isNewValue = 1;
let CextOpcode = "S2_storerh";
let BaseOpcode = "S2_storerh_ur";
let DecoderNamespace = "MustExtend";
@@ -24259,7 +24340,7 @@ def S4_storeri_ap : HInst<
(outs IntRegs:$Re32),
(ins u32_0Imm:$II, IntRegs:$Rt32),
"memw($Re32=#$II) = $Rt32",
-ST_tc_st_SLOT01, TypeST>, Enc_11477246, AddrModeRel {
+tc_336e698c, TypeST>, Enc_8bcba4, AddrModeRel {
let Inst{7-6} = 0b10;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b10101011100;
@@ -24282,7 +24363,7 @@ def S4_storeri_rr : HInst<
(outs),
(ins IntRegs:$Rs32, IntRegs:$Ru32, u2_0Imm:$Ii, IntRegs:$Rt32),
"memw($Rs32+$Ru32<<#$Ii) = $Rt32",
-V4LDST_tc_st_SLOT01, TypeST>, Enc_14046916, AddrModeRel, ImmRegShl {
+tc_45631a8d, TypeST>, Enc_eca7c8, AddrModeRel, ImmRegShl {
let Inst{6-5} = 0b00;
let Inst{31-21} = 0b00111011100;
let addrMode = BaseRegOffset;
@@ -24298,7 +24379,7 @@ def S4_storeri_ur : HInst<
(outs),
(ins IntRegs:$Ru32, u2_0Imm:$Ii, u32_0Imm:$II, IntRegs:$Rt32),
"memw($Ru32<<#$Ii+#$II) = $Rt32",
-ST_tc_st_SLOT01, TypeST>, Enc_14689096, AddrModeRel, ImmRegShl {
+tc_a4567c39, TypeST>, Enc_9ea4cf, AddrModeRel, ImmRegShl {
let Inst{7-7} = 0b1;
let Inst{31-21} = 0b10101101100;
let addrMode = BaseLongOffset;
@@ -24320,7 +24401,7 @@ def S4_storerinew_ap : HInst<
(outs IntRegs:$Re32),
(ins u32_0Imm:$II, IntRegs:$Nt8),
"memw($Re32=#$II) = $Nt8.new",
-NCJ_tc_3or4stall_SLOT0, TypeST>, Enc_14193700, AddrModeRel {
+tc_7986ba30, TypeST>, Enc_724154, AddrModeRel {
let Inst{7-6} = 0b10;
let Inst{13-11} = 0b010;
let Inst{31-21} = 0b10101011101;
@@ -24329,9 +24410,9 @@ let opNewValue = 0;
let addrMode = AbsoluteSet;
let accessSize = WordAccess;
let isNVStore = 1;
+let isNewValue = 1;
let isExtended = 1;
let mayStore = 1;
-let isNewValue = 1;
let BaseOpcode = "S2_storeri_ap";
let DecoderNamespace = "MustExtend";
let isExtendable = 1;
@@ -24345,14 +24426,14 @@ def S4_storerinew_rr : HInst<
(outs),
(ins IntRegs:$Rs32, IntRegs:$Ru32, u2_0Imm:$Ii, IntRegs:$Nt8),
"memw($Rs32+$Ru32<<#$Ii) = $Nt8.new",
-V4LDST_tc_st_SLOT0, TypeST>, Enc_5486172, AddrModeRel {
+tc_be995eaf, TypeST>, Enc_c6220b, AddrModeRel {
let Inst{6-3} = 0b0010;
let Inst{31-21} = 0b00111011101;
let addrMode = BaseRegOffset;
let accessSize = WordAccess;
let isNVStore = 1;
-let mayStore = 1;
let isNewValue = 1;
+let mayStore = 1;
let CextOpcode = "S2_storeri";
let InputType = "reg";
let BaseOpcode = "S2_storeri_rr";
@@ -24363,16 +24444,16 @@ def S4_storerinew_ur : HInst<
(outs),
(ins IntRegs:$Ru32, u2_0Imm:$Ii, u32_0Imm:$II, IntRegs:$Nt8),
"memw($Ru32<<#$Ii+#$II) = $Nt8.new",
-NCJ_tc_3or4stall_SLOT0, TypeST>, Enc_10076500, AddrModeRel {
+tc_210b2456, TypeST>, Enc_7eb485, AddrModeRel {
let Inst{7-7} = 0b1;
let Inst{12-11} = 0b10;
let Inst{31-21} = 0b10101101101;
let addrMode = BaseLongOffset;
let accessSize = WordAccess;
let isNVStore = 1;
+let isNewValue = 1;
let isExtended = 1;
let mayStore = 1;
-let isNewValue = 1;
let CextOpcode = "S2_storeri";
let BaseOpcode = "S2_storeri_ur";
let DecoderNamespace = "MustExtend";
@@ -24387,7 +24468,7 @@ def S4_subaddi : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rs32, s32_0Imm:$Ii, IntRegs:$Ru32),
"$Rd32 = add($Rs32,sub(#$Ii,$Ru32))",
-ALU64_tc_2_SLOT23, TypeALU64>, Enc_6495334 {
+tc_090485bb, TypeALU64>, Enc_8b8d61 {
let Inst{31-23} = 0b110110111;
let hasNewValue = 1;
let opNewValue = 0;
@@ -24402,7 +24483,7 @@ def S4_subi_asl_ri : HInst<
(outs IntRegs:$Rx32),
(ins u32_0Imm:$Ii, IntRegs:$Rx32in, u5_0Imm:$II),
"$Rx32 = sub(#$Ii,asl($Rx32in,#$II))",
-ALU64_tc_1_SLOT23, TypeALU64>, Enc_117962 {
+tc_c0cd91a8, TypeALU64>, Enc_c31910 {
let Inst{2-0} = 0b110;
let Inst{4-4} = 0b0;
let Inst{31-24} = 0b11011110;
@@ -24420,7 +24501,7 @@ def S4_subi_lsr_ri : HInst<
(outs IntRegs:$Rx32),
(ins u32_0Imm:$Ii, IntRegs:$Rx32in, u5_0Imm:$II),
"$Rx32 = sub(#$Ii,lsr($Rx32in,#$II))",
-ALU64_tc_1_SLOT23, TypeALU64>, Enc_117962 {
+tc_c0cd91a8, TypeALU64>, Enc_c31910 {
let Inst{2-0} = 0b110;
let Inst{4-4} = 0b1;
let Inst{31-24} = 0b11011110;
@@ -24438,7 +24519,7 @@ def S4_vrcrotate : HInst<
(outs DoubleRegs:$Rdd32),
(ins DoubleRegs:$Rss32, IntRegs:$Rt32, u2_0Imm:$Ii),
"$Rdd32 = vrcrotate($Rss32,$Rt32,#$Ii)",
-S_3op_tc_3x_SLOT23, TypeS_3op>, Enc_114098 {
+tc_6264c5e0, TypeS_3op>, Enc_645d54 {
let Inst{7-6} = 0b11;
let Inst{31-21} = 0b11000011110;
let prefersSlot3 = 1;
@@ -24447,7 +24528,7 @@ def S4_vrcrotate_acc : HInst<
(outs DoubleRegs:$Rxx32),
(ins DoubleRegs:$Rxx32in, DoubleRegs:$Rss32, IntRegs:$Rt32, u2_0Imm:$Ii),
"$Rxx32 += vrcrotate($Rss32,$Rt32,#$Ii)",
-S_3op_tc_3x_SLOT23, TypeS_3op>, Enc_13114546 {
+tc_bc5561d8, TypeS_3op>, Enc_b72622 {
let Inst{7-6} = 0b00;
let Inst{31-21} = 0b11001011101;
let prefersSlot3 = 1;
@@ -24457,17 +24538,18 @@ def S4_vxaddsubh : HInst<
(outs DoubleRegs:$Rdd32),
(ins DoubleRegs:$Rss32, DoubleRegs:$Rtt32),
"$Rdd32 = vxaddsubh($Rss32,$Rtt32):sat",
-S_3op_tc_2_SLOT23, TypeS_3op>, Enc_8333157 {
+tc_47ab9233, TypeS_3op>, Enc_a56825 {
let Inst{7-5} = 0b100;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11000001010;
+let prefersSlot3 = 1;
let Defs = [USR_OVF];
}
def S4_vxaddsubhr : HInst<
(outs DoubleRegs:$Rdd32),
(ins DoubleRegs:$Rss32, DoubleRegs:$Rtt32),
"$Rdd32 = vxaddsubh($Rss32,$Rtt32):rnd:>>1:sat",
-S_3op_tc_2_SLOT23, TypeS_3op>, Enc_8333157 {
+tc_63cd9d2d, TypeS_3op>, Enc_a56825 {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11000001110;
@@ -24478,27 +24560,29 @@ def S4_vxaddsubw : HInst<
(outs DoubleRegs:$Rdd32),
(ins DoubleRegs:$Rss32, DoubleRegs:$Rtt32),
"$Rdd32 = vxaddsubw($Rss32,$Rtt32):sat",
-S_3op_tc_2_SLOT23, TypeS_3op>, Enc_8333157 {
+tc_47ab9233, TypeS_3op>, Enc_a56825 {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11000001010;
+let prefersSlot3 = 1;
let Defs = [USR_OVF];
}
def S4_vxsubaddh : HInst<
(outs DoubleRegs:$Rdd32),
(ins DoubleRegs:$Rss32, DoubleRegs:$Rtt32),
"$Rdd32 = vxsubaddh($Rss32,$Rtt32):sat",
-S_3op_tc_2_SLOT23, TypeS_3op>, Enc_8333157 {
+tc_47ab9233, TypeS_3op>, Enc_a56825 {
let Inst{7-5} = 0b110;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11000001010;
+let prefersSlot3 = 1;
let Defs = [USR_OVF];
}
def S4_vxsubaddhr : HInst<
(outs DoubleRegs:$Rdd32),
(ins DoubleRegs:$Rss32, DoubleRegs:$Rtt32),
"$Rdd32 = vxsubaddh($Rss32,$Rtt32):rnd:>>1:sat",
-S_3op_tc_2_SLOT23, TypeS_3op>, Enc_8333157 {
+tc_63cd9d2d, TypeS_3op>, Enc_a56825 {
let Inst{7-5} = 0b010;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11000001110;
@@ -24509,17 +24593,18 @@ def S4_vxsubaddw : HInst<
(outs DoubleRegs:$Rdd32),
(ins DoubleRegs:$Rss32, DoubleRegs:$Rtt32),
"$Rdd32 = vxsubaddw($Rss32,$Rtt32):sat",
-S_3op_tc_2_SLOT23, TypeS_3op>, Enc_8333157 {
+tc_47ab9233, TypeS_3op>, Enc_a56825 {
let Inst{7-5} = 0b010;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11000001010;
+let prefersSlot3 = 1;
let Defs = [USR_OVF];
}
def S5_asrhub_rnd_sat : HInst<
(outs IntRegs:$Rd32),
(ins DoubleRegs:$Rss32, u4_0Imm:$Ii),
"$Rd32 = vasrhub($Rss32,#$Ii):raw",
-S_2op_tc_2_SLOT23, TypeS_2op>, Enc_8038806, Requires<[HasV5T]> {
+tc_63cd9d2d, TypeS_2op>, Enc_11a146, Requires<[HasV5T]> {
let Inst{7-5} = 0b100;
let Inst{13-12} = 0b00;
let Inst{31-21} = 0b10001000011;
@@ -24532,7 +24617,7 @@ def S5_asrhub_rnd_sat_goodsyntax : HInst<
(outs IntRegs:$Rd32),
(ins DoubleRegs:$Rss32, u4_0Imm:$Ii),
"$Rd32 = vasrhub($Rss32,#$Ii):rnd:sat",
-S_2op_tc_2_SLOT23, TypeS_2op>, Requires<[HasV5T]> {
+tc_63cd9d2d, TypeS_2op>, Requires<[HasV5T]> {
let hasNewValue = 1;
let opNewValue = 0;
let isPseudo = 1;
@@ -24541,7 +24626,7 @@ def S5_asrhub_sat : HInst<
(outs IntRegs:$Rd32),
(ins DoubleRegs:$Rss32, u4_0Imm:$Ii),
"$Rd32 = vasrhub($Rss32,#$Ii):sat",
-S_2op_tc_2_SLOT23, TypeS_2op>, Enc_8038806, Requires<[HasV5T]> {
+tc_63cd9d2d, TypeS_2op>, Enc_11a146, Requires<[HasV5T]> {
let Inst{7-5} = 0b101;
let Inst{13-12} = 0b00;
let Inst{31-21} = 0b10001000011;
@@ -24554,7 +24639,7 @@ def S5_popcountp : HInst<
(outs IntRegs:$Rd32),
(ins DoubleRegs:$Rss32),
"$Rd32 = popcount($Rss32)",
-S_2op_tc_2_SLOT23, TypeS_2op>, Enc_3742184, Requires<[HasV5T]> {
+tc_ca280e8b, TypeS_2op>, Enc_90cd8b, Requires<[HasV5T]> {
let Inst{13-5} = 0b000000011;
let Inst{31-21} = 0b10001000011;
let hasNewValue = 1;
@@ -24565,7 +24650,7 @@ def S5_vasrhrnd : HInst<
(outs DoubleRegs:$Rdd32),
(ins DoubleRegs:$Rss32, u4_0Imm:$Ii),
"$Rdd32 = vasrh($Rss32,#$Ii):raw",
-S_2op_tc_1_SLOT23, TypeS_2op>, Enc_2082775, Requires<[HasV5T]> {
+tc_63cd9d2d, TypeS_2op>, Enc_12b6e9, Requires<[HasV5T]> {
let Inst{7-5} = 0b000;
let Inst{13-12} = 0b00;
let Inst{31-21} = 0b10000000001;
@@ -24575,14 +24660,14 @@ def S5_vasrhrnd_goodsyntax : HInst<
(outs DoubleRegs:$Rdd32),
(ins DoubleRegs:$Rss32, u4_0Imm:$Ii),
"$Rdd32 = vasrh($Rss32,#$Ii):rnd",
-S_2op_tc_1_SLOT23, TypeS_2op>, Requires<[HasV5T]> {
+tc_63cd9d2d, TypeS_2op>, Requires<[HasV5T]> {
let isPseudo = 1;
}
def S6_rol_i_p : HInst<
(outs DoubleRegs:$Rdd32),
(ins DoubleRegs:$Rss32, u6_0Imm:$Ii),
"$Rdd32 = rol($Rss32,#$Ii)",
-S_2op_tc_1_SLOT23, TypeS_2op>, Enc_4231995, Requires<[HasV60T]> {
+tc_9f518242, TypeS_2op>, Enc_5eac98, Requires<[HasV60T]> {
let Inst{7-5} = 0b011;
let Inst{31-21} = 0b10000000000;
}
@@ -24590,7 +24675,7 @@ def S6_rol_i_p_acc : HInst<
(outs DoubleRegs:$Rxx32),
(ins DoubleRegs:$Rxx32in, DoubleRegs:$Rss32, u6_0Imm:$Ii),
"$Rxx32 += rol($Rss32,#$Ii)",
-S_2op_tc_1_SLOT23, TypeS_2op>, Enc_8497723, Requires<[HasV60T]> {
+tc_e17ce9ad, TypeS_2op>, Enc_70fb07, Requires<[HasV60T]> {
let Inst{7-5} = 0b111;
let Inst{31-21} = 0b10000010000;
let prefersSlot3 = 1;
@@ -24600,7 +24685,7 @@ def S6_rol_i_p_and : HInst<
(outs DoubleRegs:$Rxx32),
(ins DoubleRegs:$Rxx32in, DoubleRegs:$Rss32, u6_0Imm:$Ii),
"$Rxx32 &= rol($Rss32,#$Ii)",
-S_2op_tc_1_SLOT23, TypeS_2op>, Enc_8497723, Requires<[HasV60T]> {
+tc_e17ce9ad, TypeS_2op>, Enc_70fb07, Requires<[HasV60T]> {
let Inst{7-5} = 0b011;
let Inst{31-21} = 0b10000010010;
let prefersSlot3 = 1;
@@ -24610,7 +24695,7 @@ def S6_rol_i_p_nac : HInst<
(outs DoubleRegs:$Rxx32),
(ins DoubleRegs:$Rxx32in, DoubleRegs:$Rss32, u6_0Imm:$Ii),
"$Rxx32 -= rol($Rss32,#$Ii)",
-S_2op_tc_1_SLOT23, TypeS_2op>, Enc_8497723, Requires<[HasV60T]> {
+tc_e17ce9ad, TypeS_2op>, Enc_70fb07, Requires<[HasV60T]> {
let Inst{7-5} = 0b011;
let Inst{31-21} = 0b10000010000;
let prefersSlot3 = 1;
@@ -24620,7 +24705,7 @@ def S6_rol_i_p_or : HInst<
(outs DoubleRegs:$Rxx32),
(ins DoubleRegs:$Rxx32in, DoubleRegs:$Rss32, u6_0Imm:$Ii),
"$Rxx32 |= rol($Rss32,#$Ii)",
-S_2op_tc_1_SLOT23, TypeS_2op>, Enc_8497723, Requires<[HasV60T]> {
+tc_e17ce9ad, TypeS_2op>, Enc_70fb07, Requires<[HasV60T]> {
let Inst{7-5} = 0b111;
let Inst{31-21} = 0b10000010010;
let prefersSlot3 = 1;
@@ -24630,7 +24715,7 @@ def S6_rol_i_p_xacc : HInst<
(outs DoubleRegs:$Rxx32),
(ins DoubleRegs:$Rxx32in, DoubleRegs:$Rss32, u6_0Imm:$Ii),
"$Rxx32 ^= rol($Rss32,#$Ii)",
-S_2op_tc_1_SLOT23, TypeS_2op>, Enc_8497723, Requires<[HasV60T]> {
+tc_e17ce9ad, TypeS_2op>, Enc_70fb07, Requires<[HasV60T]> {
let Inst{7-5} = 0b011;
let Inst{31-21} = 0b10000010100;
let prefersSlot3 = 1;
@@ -24640,7 +24725,7 @@ def S6_rol_i_r : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rs32, u5_0Imm:$Ii),
"$Rd32 = rol($Rs32,#$Ii)",
-S_2op_tc_1_SLOT23, TypeS_2op>, Enc_2771456, Requires<[HasV60T]> {
+tc_9f518242, TypeS_2op>, Enc_a05677, Requires<[HasV60T]> {
let Inst{7-5} = 0b011;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b10001100000;
@@ -24651,7 +24736,7 @@ def S6_rol_i_r_acc : HInst<
(outs IntRegs:$Rx32),
(ins IntRegs:$Rx32in, IntRegs:$Rs32, u5_0Imm:$Ii),
"$Rx32 += rol($Rs32,#$Ii)",
-S_2op_tc_1_SLOT23, TypeS_2op>, Enc_2410156, Requires<[HasV60T]> {
+tc_e17ce9ad, TypeS_2op>, Enc_28a2dc, Requires<[HasV60T]> {
let Inst{7-5} = 0b111;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b10001110000;
@@ -24664,7 +24749,7 @@ def S6_rol_i_r_and : HInst<
(outs IntRegs:$Rx32),
(ins IntRegs:$Rx32in, IntRegs:$Rs32, u5_0Imm:$Ii),
"$Rx32 &= rol($Rs32,#$Ii)",
-S_2op_tc_1_SLOT23, TypeS_2op>, Enc_2410156, Requires<[HasV60T]> {
+tc_e17ce9ad, TypeS_2op>, Enc_28a2dc, Requires<[HasV60T]> {
let Inst{7-5} = 0b011;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b10001110010;
@@ -24677,7 +24762,7 @@ def S6_rol_i_r_nac : HInst<
(outs IntRegs:$Rx32),
(ins IntRegs:$Rx32in, IntRegs:$Rs32, u5_0Imm:$Ii),
"$Rx32 -= rol($Rs32,#$Ii)",
-S_2op_tc_1_SLOT23, TypeS_2op>, Enc_2410156, Requires<[HasV60T]> {
+tc_e17ce9ad, TypeS_2op>, Enc_28a2dc, Requires<[HasV60T]> {
let Inst{7-5} = 0b011;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b10001110000;
@@ -24690,7 +24775,7 @@ def S6_rol_i_r_or : HInst<
(outs IntRegs:$Rx32),
(ins IntRegs:$Rx32in, IntRegs:$Rs32, u5_0Imm:$Ii),
"$Rx32 |= rol($Rs32,#$Ii)",
-S_2op_tc_1_SLOT23, TypeS_2op>, Enc_2410156, Requires<[HasV60T]> {
+tc_e17ce9ad, TypeS_2op>, Enc_28a2dc, Requires<[HasV60T]> {
let Inst{7-5} = 0b111;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b10001110010;
@@ -24703,7 +24788,7 @@ def S6_rol_i_r_xacc : HInst<
(outs IntRegs:$Rx32),
(ins IntRegs:$Rx32in, IntRegs:$Rs32, u5_0Imm:$Ii),
"$Rx32 ^= rol($Rs32,#$Ii)",
-S_2op_tc_1_SLOT23, TypeS_2op>, Enc_2410156, Requires<[HasV60T]> {
+tc_e17ce9ad, TypeS_2op>, Enc_28a2dc, Requires<[HasV60T]> {
let Inst{7-5} = 0b011;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b10001110100;
@@ -24716,7 +24801,7 @@ def S6_vsplatrbp : HInst<
(outs DoubleRegs:$Rdd32),
(ins IntRegs:$Rs32),
"$Rdd32 = vsplatb($Rs32)",
-S_2op_tc_1_SLOT23, TypeS_2op>, Enc_4030179, Requires<[HasV62T]> {
+tc_78b3c689, TypeS_2op>, Enc_3a3d62, Requires<[HasV62T]> {
let Inst{13-5} = 0b000000100;
let Inst{31-21} = 0b10000100010;
}
@@ -24724,7 +24809,7 @@ def S6_vtrunehb_ppp : HInst<
(outs DoubleRegs:$Rdd32),
(ins DoubleRegs:$Rss32, DoubleRegs:$Rtt32),
"$Rdd32 = vtrunehb($Rss32,$Rtt32)",
-S_3op_tc_1_SLOT23, TypeS_3op>, Enc_8333157, Requires<[HasV62T]> {
+tc_9f518242, TypeS_3op>, Enc_a56825, Requires<[HasV62T]> {
let Inst{7-5} = 0b011;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11000001100;
@@ -24733,7 +24818,7 @@ def S6_vtrunohb_ppp : HInst<
(outs DoubleRegs:$Rdd32),
(ins DoubleRegs:$Rss32, DoubleRegs:$Rtt32),
"$Rdd32 = vtrunohb($Rss32,$Rtt32)",
-S_3op_tc_1_SLOT23, TypeS_3op>, Enc_8333157, Requires<[HasV62T]> {
+tc_9f518242, TypeS_3op>, Enc_a56825, Requires<[HasV62T]> {
let Inst{7-5} = 0b101;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11000001100;
@@ -24742,7 +24827,7 @@ def SA1_addi : HInst<
(outs GeneralSubRegs:$Rx16),
(ins IntRegs:$Rx16in, s32_0Imm:$Ii),
"$Rx16 = add($Rx16in,#$Ii)",
-PSEUDO, TypeSUBINSN>, Enc_3974695 {
+tc_821c4233, TypeSUBINSN>, Enc_93af4c {
let Inst{12-11} = 0b00;
let hasNewValue = 1;
let opNewValue = 0;
@@ -24759,7 +24844,7 @@ def SA1_addrx : HInst<
(outs GeneralSubRegs:$Rx16),
(ins IntRegs:$Rx16in, GeneralSubRegs:$Rs16),
"$Rx16 = add($Rx16in,$Rs16)",
-PSEUDO, TypeSUBINSN>, Enc_6135183 {
+tc_821c4233, TypeSUBINSN>, Enc_0527db {
let Inst{12-8} = 0b11000;
let hasNewValue = 1;
let opNewValue = 0;
@@ -24771,7 +24856,7 @@ def SA1_addsp : HInst<
(outs GeneralSubRegs:$Rd16),
(ins u6_2Imm:$Ii),
"$Rd16 = add(r29,#$Ii)",
-PSEUDO, TypeSUBINSN>, Enc_176263 {
+tc_d2609065, TypeSUBINSN>, Enc_2df31d {
let Inst{12-10} = 0b011;
let hasNewValue = 1;
let opNewValue = 0;
@@ -24783,7 +24868,7 @@ def SA1_and1 : HInst<
(outs GeneralSubRegs:$Rd16),
(ins GeneralSubRegs:$Rs16),
"$Rd16 = and($Rs16,#1)",
-PSEUDO, TypeSUBINSN>, Enc_14939491 {
+tc_d2609065, TypeSUBINSN>, Enc_97d666 {
let Inst{12-8} = 0b10010;
let hasNewValue = 1;
let opNewValue = 0;
@@ -24794,7 +24879,7 @@ def SA1_clrf : HInst<
(outs GeneralSubRegs:$Rd16),
(ins),
"if (!p0) $Rd16 = #0",
-PSEUDO, TypeSUBINSN>, Enc_1451363 {
+tc_7c2dcd4d, TypeSUBINSN>, Enc_1f5ba6 {
let Inst{12-4} = 0b110100111;
let isPredicated = 1;
let isPredicatedFalse = 1;
@@ -24808,7 +24893,7 @@ def SA1_clrfnew : HInst<
(outs GeneralSubRegs:$Rd16),
(ins),
"if (!p0.new) $Rd16 = #0",
-PSEUDO, TypeSUBINSN>, Enc_1451363 {
+tc_f26aa619, TypeSUBINSN>, Enc_1f5ba6 {
let Inst{12-4} = 0b110100101;
let isPredicated = 1;
let isPredicatedFalse = 1;
@@ -24823,7 +24908,7 @@ def SA1_clrt : HInst<
(outs GeneralSubRegs:$Rd16),
(ins),
"if (p0) $Rd16 = #0",
-PSEUDO, TypeSUBINSN>, Enc_1451363 {
+tc_7c2dcd4d, TypeSUBINSN>, Enc_1f5ba6 {
let Inst{12-4} = 0b110100110;
let isPredicated = 1;
let hasNewValue = 1;
@@ -24836,7 +24921,7 @@ def SA1_clrtnew : HInst<
(outs GeneralSubRegs:$Rd16),
(ins),
"if (p0.new) $Rd16 = #0",
-PSEUDO, TypeSUBINSN>, Enc_1451363 {
+tc_f26aa619, TypeSUBINSN>, Enc_1f5ba6 {
let Inst{12-4} = 0b110100100;
let isPredicated = 1;
let hasNewValue = 1;
@@ -24850,7 +24935,7 @@ def SA1_cmpeqi : HInst<
(outs),
(ins GeneralSubRegs:$Rs16, u2_0Imm:$Ii),
"p0 = cmp.eq($Rs16,#$Ii)",
-PSEUDO, TypeSUBINSN>, Enc_2079016 {
+tc_e8c7a357, TypeSUBINSN>, Enc_63eaeb {
let Inst{3-2} = 0b00;
let Inst{12-8} = 0b11001;
let AsmVariantName = "NonParsable";
@@ -24861,7 +24946,7 @@ def SA1_combine0i : HInst<
(outs GeneralDoubleLow8Regs:$Rdd8),
(ins u2_0Imm:$Ii),
"$Rdd8 = combine(#0,#$Ii)",
-PSEUDO, TypeSUBINSN>, Enc_15946706 {
+tc_d2609065, TypeSUBINSN>, Enc_ed48be {
let Inst{4-3} = 0b00;
let Inst{12-7} = 0b111000;
let hasNewValue = 1;
@@ -24873,7 +24958,7 @@ def SA1_combine1i : HInst<
(outs GeneralDoubleLow8Regs:$Rdd8),
(ins u2_0Imm:$Ii),
"$Rdd8 = combine(#1,#$Ii)",
-PSEUDO, TypeSUBINSN>, Enc_15946706 {
+tc_d2609065, TypeSUBINSN>, Enc_ed48be {
let Inst{4-3} = 0b01;
let Inst{12-7} = 0b111000;
let hasNewValue = 1;
@@ -24885,7 +24970,7 @@ def SA1_combine2i : HInst<
(outs GeneralDoubleLow8Regs:$Rdd8),
(ins u2_0Imm:$Ii),
"$Rdd8 = combine(#2,#$Ii)",
-PSEUDO, TypeSUBINSN>, Enc_15946706 {
+tc_d2609065, TypeSUBINSN>, Enc_ed48be {
let Inst{4-3} = 0b10;
let Inst{12-7} = 0b111000;
let hasNewValue = 1;
@@ -24897,7 +24982,7 @@ def SA1_combine3i : HInst<
(outs GeneralDoubleLow8Regs:$Rdd8),
(ins u2_0Imm:$Ii),
"$Rdd8 = combine(#3,#$Ii)",
-PSEUDO, TypeSUBINSN>, Enc_15946706 {
+tc_d2609065, TypeSUBINSN>, Enc_ed48be {
let Inst{4-3} = 0b11;
let Inst{12-7} = 0b111000;
let hasNewValue = 1;
@@ -24909,7 +24994,7 @@ def SA1_combinerz : HInst<
(outs GeneralDoubleLow8Regs:$Rdd8),
(ins GeneralSubRegs:$Rs16),
"$Rdd8 = combine($Rs16,#0)",
-PSEUDO, TypeSUBINSN>, Enc_10501894 {
+tc_d2609065, TypeSUBINSN>, Enc_399e12 {
let Inst{3-3} = 0b1;
let Inst{12-8} = 0b11101;
let hasNewValue = 1;
@@ -24921,7 +25006,7 @@ def SA1_combinezr : HInst<
(outs GeneralDoubleLow8Regs:$Rdd8),
(ins GeneralSubRegs:$Rs16),
"$Rdd8 = combine(#0,$Rs16)",
-PSEUDO, TypeSUBINSN>, Enc_10501894 {
+tc_d2609065, TypeSUBINSN>, Enc_399e12 {
let Inst{3-3} = 0b0;
let Inst{12-8} = 0b11101;
let hasNewValue = 1;
@@ -24933,7 +25018,7 @@ def SA1_dec : HInst<
(outs GeneralSubRegs:$Rd16),
(ins GeneralSubRegs:$Rs16, n1Const:$n1),
"$Rd16 = add($Rs16,#$n1)",
-PSEUDO, TypeSUBINSN>, Enc_10597934 {
+tc_821c4233, TypeSUBINSN>, Enc_ee5ed0 {
let Inst{12-8} = 0b10011;
let hasNewValue = 1;
let opNewValue = 0;
@@ -24944,7 +25029,7 @@ def SA1_inc : HInst<
(outs GeneralSubRegs:$Rd16),
(ins GeneralSubRegs:$Rs16),
"$Rd16 = add($Rs16,#1)",
-PSEUDO, TypeSUBINSN>, Enc_14939491 {
+tc_d2609065, TypeSUBINSN>, Enc_97d666 {
let Inst{12-8} = 0b10001;
let hasNewValue = 1;
let opNewValue = 0;
@@ -24955,7 +25040,7 @@ def SA1_seti : HInst<
(outs GeneralSubRegs:$Rd16),
(ins u32_0Imm:$Ii),
"$Rd16 = #$Ii",
-PSEUDO, TypeSUBINSN>, Enc_2176383 {
+tc_d2609065, TypeSUBINSN>, Enc_e39bb2 {
let Inst{12-10} = 0b010;
let hasNewValue = 1;
let opNewValue = 0;
@@ -24971,7 +25056,7 @@ def SA1_setin1 : HInst<
(outs GeneralSubRegs:$Rd16),
(ins n1Const:$n1),
"$Rd16 = #$n1",
-PSEUDO, TypeSUBINSN>, Enc_13336212 {
+tc_d2609065, TypeSUBINSN>, Enc_7a0ea6 {
let Inst{12-4} = 0b110100000;
let hasNewValue = 1;
let opNewValue = 0;
@@ -24982,7 +25067,7 @@ def SA1_sxtb : HInst<
(outs GeneralSubRegs:$Rd16),
(ins GeneralSubRegs:$Rs16),
"$Rd16 = sxtb($Rs16)",
-PSEUDO, TypeSUBINSN>, Enc_14939491 {
+tc_d2609065, TypeSUBINSN>, Enc_97d666 {
let Inst{12-8} = 0b10101;
let hasNewValue = 1;
let opNewValue = 0;
@@ -24993,7 +25078,7 @@ def SA1_sxth : HInst<
(outs GeneralSubRegs:$Rd16),
(ins GeneralSubRegs:$Rs16),
"$Rd16 = sxth($Rs16)",
-PSEUDO, TypeSUBINSN>, Enc_14939491 {
+tc_d2609065, TypeSUBINSN>, Enc_97d666 {
let Inst{12-8} = 0b10100;
let hasNewValue = 1;
let opNewValue = 0;
@@ -25004,7 +25089,7 @@ def SA1_tfr : HInst<
(outs GeneralSubRegs:$Rd16),
(ins GeneralSubRegs:$Rs16),
"$Rd16 = $Rs16",
-PSEUDO, TypeSUBINSN>, Enc_14939491 {
+tc_d2609065, TypeSUBINSN>, Enc_97d666 {
let Inst{12-8} = 0b10000;
let hasNewValue = 1;
let opNewValue = 0;
@@ -25015,7 +25100,7 @@ def SA1_zxtb : HInst<
(outs GeneralSubRegs:$Rd16),
(ins GeneralSubRegs:$Rs16),
"$Rd16 = and($Rs16,#255)",
-PSEUDO, TypeSUBINSN>, Enc_14939491 {
+tc_d2609065, TypeSUBINSN>, Enc_97d666 {
let Inst{12-8} = 0b10111;
let hasNewValue = 1;
let opNewValue = 0;
@@ -25026,7 +25111,7 @@ def SA1_zxth : HInst<
(outs GeneralSubRegs:$Rd16),
(ins GeneralSubRegs:$Rs16),
"$Rd16 = zxth($Rs16)",
-PSEUDO, TypeSUBINSN>, Enc_14939491 {
+tc_d2609065, TypeSUBINSN>, Enc_97d666 {
let Inst{12-8} = 0b10110;
let hasNewValue = 1;
let opNewValue = 0;
@@ -25037,7 +25122,7 @@ def SL1_loadri_io : HInst<
(outs GeneralSubRegs:$Rd16),
(ins GeneralSubRegs:$Rs16, u4_2Imm:$Ii),
"$Rd16 = memw($Rs16+#$Ii)",
-PSEUDO, TypeSUBINSN>, Enc_13606251 {
+tc_bf6fa601, TypeSUBINSN>, Enc_53dca9 {
let Inst{12-12} = 0b0;
let hasNewValue = 1;
let opNewValue = 0;
@@ -25051,7 +25136,7 @@ def SL1_loadrub_io : HInst<
(outs GeneralSubRegs:$Rd16),
(ins GeneralSubRegs:$Rs16, u4_0Imm:$Ii),
"$Rd16 = memub($Rs16+#$Ii)",
-PSEUDO, TypeSUBINSN>, Enc_15606259 {
+tc_bf6fa601, TypeSUBINSN>, Enc_c175d0 {
let Inst{12-12} = 0b1;
let hasNewValue = 1;
let opNewValue = 0;
@@ -25065,7 +25150,7 @@ def SL2_deallocframe : HInst<
(outs),
(ins),
"deallocframe",
-PSEUDO, TypeSUBINSN>, Enc_0 {
+tc_86442910, TypeSUBINSN>, Enc_e3b0c4 {
let Inst{12-0} = 0b1111100000000;
let accessSize = DoubleWordAccess;
let AsmVariantName = "NonParsable";
@@ -25078,7 +25163,7 @@ def SL2_jumpr31 : HInst<
(outs),
(ins),
"jumpr r31",
-PSEUDO, TypeSUBINSN>, Enc_0 {
+tc_35fb9d13, TypeSUBINSN>, Enc_e3b0c4 {
let Inst{12-0} = 0b1111111000000;
let isTerminator = 1;
let isIndirectBranch = 1;
@@ -25093,7 +25178,7 @@ def SL2_jumpr31_f : HInst<
(outs),
(ins),
"if (!p0) jumpr r31",
-PSEUDO, TypeSUBINSN>, Enc_0 {
+tc_35fb9d13, TypeSUBINSN>, Enc_e3b0c4 {
let Inst{12-0} = 0b1111111000101;
let isPredicated = 1;
let isPredicatedFalse = 1;
@@ -25111,7 +25196,7 @@ def SL2_jumpr31_fnew : HInst<
(outs),
(ins),
"if (!p0.new) jumpr:nt r31",
-PSEUDO, TypeSUBINSN>, Enc_0 {
+tc_35fb9d13, TypeSUBINSN>, Enc_e3b0c4 {
let Inst{12-0} = 0b1111111000111;
let isPredicated = 1;
let isPredicatedFalse = 1;
@@ -25119,8 +25204,8 @@ let isTerminator = 1;
let isIndirectBranch = 1;
let cofMax1 = 1;
let AsmVariantName = "NonParsable";
-let isReturn = 1;
let isPredicatedNew = 1;
+let isReturn = 1;
let Uses = [P0, R31];
let Defs = [PC];
let isTaken = Inst{4};
@@ -25130,7 +25215,7 @@ def SL2_jumpr31_t : HInst<
(outs),
(ins),
"if (p0) jumpr r31",
-PSEUDO, TypeSUBINSN>, Enc_0 {
+tc_35fb9d13, TypeSUBINSN>, Enc_e3b0c4 {
let Inst{12-0} = 0b1111111000100;
let isPredicated = 1;
let isTerminator = 1;
@@ -25147,15 +25232,15 @@ def SL2_jumpr31_tnew : HInst<
(outs),
(ins),
"if (p0.new) jumpr:nt r31",
-PSEUDO, TypeSUBINSN>, Enc_0 {
+tc_35fb9d13, TypeSUBINSN>, Enc_e3b0c4 {
let Inst{12-0} = 0b1111111000110;
let isPredicated = 1;
let isTerminator = 1;
let isIndirectBranch = 1;
let cofMax1 = 1;
let AsmVariantName = "NonParsable";
-let isReturn = 1;
let isPredicatedNew = 1;
+let isReturn = 1;
let Uses = [P0, R31];
let Defs = [PC];
let isTaken = Inst{4};
@@ -25165,7 +25250,7 @@ def SL2_loadrb_io : HInst<
(outs GeneralSubRegs:$Rd16),
(ins GeneralSubRegs:$Rs16, u3_0Imm:$Ii),
"$Rd16 = memb($Rs16+#$Ii)",
-PSEUDO, TypeSUBINSN>, Enc_3135259 {
+tc_bf6fa601, TypeSUBINSN>, Enc_2fbf3c {
let Inst{12-11} = 0b10;
let hasNewValue = 1;
let opNewValue = 0;
@@ -25179,7 +25264,7 @@ def SL2_loadrd_sp : HInst<
(outs GeneralDoubleLow8Regs:$Rdd8),
(ins u5_3Imm:$Ii),
"$Rdd8 = memd(r29+#$Ii)",
-PSEUDO, TypeSUBINSN>, Enc_16479122 {
+tc_70cabf66, TypeSUBINSN>, Enc_86a14b {
let Inst{12-8} = 0b11110;
let hasNewValue = 1;
let opNewValue = 0;
@@ -25194,7 +25279,7 @@ def SL2_loadrh_io : HInst<
(outs GeneralSubRegs:$Rd16),
(ins GeneralSubRegs:$Rs16, u3_1Imm:$Ii),
"$Rd16 = memh($Rs16+#$Ii)",
-PSEUDO, TypeSUBINSN>, Enc_4135257 {
+tc_bf6fa601, TypeSUBINSN>, Enc_2bae10 {
let Inst{12-11} = 0b00;
let hasNewValue = 1;
let opNewValue = 0;
@@ -25208,7 +25293,7 @@ def SL2_loadri_sp : HInst<
(outs GeneralSubRegs:$Rd16),
(ins u5_2Imm:$Ii),
"$Rd16 = memw(r29+#$Ii)",
-PSEUDO, TypeSUBINSN>, Enc_64199 {
+tc_70cabf66, TypeSUBINSN>, Enc_51635c {
let Inst{12-9} = 0b1110;
let hasNewValue = 1;
let opNewValue = 0;
@@ -25223,7 +25308,7 @@ def SL2_loadruh_io : HInst<
(outs GeneralSubRegs:$Rd16),
(ins GeneralSubRegs:$Rs16, u3_1Imm:$Ii),
"$Rd16 = memuh($Rs16+#$Ii)",
-PSEUDO, TypeSUBINSN>, Enc_4135257 {
+tc_bf6fa601, TypeSUBINSN>, Enc_2bae10 {
let Inst{12-11} = 0b01;
let hasNewValue = 1;
let opNewValue = 0;
@@ -25237,15 +25322,15 @@ def SL2_return : HInst<
(outs),
(ins),
"dealloc_return",
-PSEUDO, TypeSUBINSN>, Enc_0 {
+tc_95c54f8b, TypeSUBINSN>, Enc_e3b0c4 {
let Inst{12-0} = 0b1111101000000;
let isTerminator = 1;
let isIndirectBranch = 1;
let accessSize = DoubleWordAccess;
let cofMax1 = 1;
let AsmVariantName = "NonParsable";
-let isReturn = 1;
let mayLoad = 1;
+let isReturn = 1;
let Uses = [R30];
let Defs = [PC, R30, R29, R31];
let DecoderNamespace = "SUBINSN_L2";
@@ -25254,7 +25339,7 @@ def SL2_return_f : HInst<
(outs),
(ins),
"if (!p0) dealloc_return",
-PSEUDO, TypeSUBINSN>, Enc_0 {
+tc_95c54f8b, TypeSUBINSN>, Enc_e3b0c4 {
let Inst{12-0} = 0b1111101000101;
let isPredicated = 1;
let isPredicatedFalse = 1;
@@ -25263,8 +25348,8 @@ let isIndirectBranch = 1;
let accessSize = DoubleWordAccess;
let cofMax1 = 1;
let AsmVariantName = "NonParsable";
-let isReturn = 1;
let mayLoad = 1;
+let isReturn = 1;
let Uses = [P0, R30];
let Defs = [PC, R30, R29, R31];
let isTaken = Inst{4};
@@ -25274,7 +25359,7 @@ def SL2_return_fnew : HInst<
(outs),
(ins),
"if (!p0.new) dealloc_return:nt",
-PSEUDO, TypeSUBINSN>, Enc_0 {
+tc_95c54f8b, TypeSUBINSN>, Enc_e3b0c4 {
let Inst{12-0} = 0b1111101000111;
let isPredicated = 1;
let isPredicatedFalse = 1;
@@ -25283,9 +25368,9 @@ let isIndirectBranch = 1;
let accessSize = DoubleWordAccess;
let cofMax1 = 1;
let AsmVariantName = "NonParsable";
-let isReturn = 1;
let isPredicatedNew = 1;
let mayLoad = 1;
+let isReturn = 1;
let Uses = [P0, R30];
let Defs = [PC, R30, R29, R31];
let isTaken = Inst{4};
@@ -25295,7 +25380,7 @@ def SL2_return_t : HInst<
(outs),
(ins),
"if (p0) dealloc_return",
-PSEUDO, TypeSUBINSN>, Enc_0 {
+tc_95c54f8b, TypeSUBINSN>, Enc_e3b0c4 {
let Inst{12-0} = 0b1111101000100;
let isPredicated = 1;
let isTerminator = 1;
@@ -25303,8 +25388,8 @@ let isIndirectBranch = 1;
let accessSize = DoubleWordAccess;
let cofMax1 = 1;
let AsmVariantName = "NonParsable";
-let isReturn = 1;
let mayLoad = 1;
+let isReturn = 1;
let Uses = [P0, R30];
let Defs = [PC, R30, R29, R31];
let isTaken = Inst{4};
@@ -25314,7 +25399,7 @@ def SL2_return_tnew : HInst<
(outs),
(ins),
"if (p0.new) dealloc_return:nt",
-PSEUDO, TypeSUBINSN>, Enc_0 {
+tc_95c54f8b, TypeSUBINSN>, Enc_e3b0c4 {
let Inst{12-0} = 0b1111101000110;
let isPredicated = 1;
let isTerminator = 1;
@@ -25322,9 +25407,9 @@ let isIndirectBranch = 1;
let accessSize = DoubleWordAccess;
let cofMax1 = 1;
let AsmVariantName = "NonParsable";
-let isReturn = 1;
let isPredicatedNew = 1;
let mayLoad = 1;
+let isReturn = 1;
let Uses = [P0, R30];
let Defs = [PC, R30, R29, R31];
let isTaken = Inst{4};
@@ -25334,7 +25419,7 @@ def SS1_storeb_io : HInst<
(outs),
(ins GeneralSubRegs:$Rs16, u4_0Imm:$Ii, GeneralSubRegs:$Rt16),
"memb($Rs16+#$Ii) = $Rt16",
-PSEUDO, TypeSUBINSN>, Enc_13204995 {
+tc_53ee6546, TypeSUBINSN>, Enc_b38ffc {
let Inst{12-12} = 0b1;
let addrMode = BaseImmOffset;
let accessSize = ByteAccess;
@@ -25346,7 +25431,7 @@ def SS1_storew_io : HInst<
(outs),
(ins GeneralSubRegs:$Rs16, u4_2Imm:$Ii, GeneralSubRegs:$Rt16),
"memw($Rs16+#$Ii) = $Rt16",
-PSEUDO, TypeSUBINSN>, Enc_11205051 {
+tc_53ee6546, TypeSUBINSN>, Enc_f55a0c {
let Inst{12-12} = 0b0;
let addrMode = BaseImmOffset;
let accessSize = WordAccess;
@@ -25358,7 +25443,7 @@ def SS2_allocframe : HInst<
(outs),
(ins u5_3Imm:$Ii),
"allocframe(#$Ii)",
-PSEUDO, TypeSUBINSN>, Enc_7884306 {
+tc_f027ebe9, TypeSUBINSN>, Enc_6f70ca {
let Inst{3-0} = 0b0000;
let Inst{12-9} = 0b1110;
let addrMode = BaseImmOffset;
@@ -25373,7 +25458,7 @@ def SS2_storebi0 : HInst<
(outs),
(ins GeneralSubRegs:$Rs16, u4_0Imm:$Ii),
"memb($Rs16+#$Ii) = #0",
-PSEUDO, TypeSUBINSN>, Enc_13536408 {
+tc_6c52d277, TypeSUBINSN>, Enc_84d359 {
let Inst{12-8} = 0b10010;
let addrMode = BaseImmOffset;
let accessSize = ByteAccess;
@@ -25385,7 +25470,7 @@ def SS2_storebi1 : HInst<
(outs),
(ins GeneralSubRegs:$Rs16, u4_0Imm:$Ii),
"memb($Rs16+#$Ii) = #1",
-PSEUDO, TypeSUBINSN>, Enc_13536408 {
+tc_6c52d277, TypeSUBINSN>, Enc_84d359 {
let Inst{12-8} = 0b10011;
let addrMode = BaseImmOffset;
let accessSize = ByteAccess;
@@ -25397,7 +25482,7 @@ def SS2_stored_sp : HInst<
(outs),
(ins s6_3Imm:$Ii, GeneralDoubleLow8Regs:$Rtt8),
"memd(r29+#$Ii) = $Rtt8",
-PSEUDO, TypeSUBINSN>, Enc_9165078 {
+tc_c14739d5, TypeSUBINSN>, Enc_b8309d {
let Inst{12-9} = 0b0101;
let addrMode = BaseImmOffset;
let accessSize = DoubleWordAccess;
@@ -25410,7 +25495,7 @@ def SS2_storeh_io : HInst<
(outs),
(ins GeneralSubRegs:$Rs16, u3_1Imm:$Ii, GeneralSubRegs:$Rt16),
"memh($Rs16+#$Ii) = $Rt16",
-PSEUDO, TypeSUBINSN>, Enc_1734121 {
+tc_53ee6546, TypeSUBINSN>, Enc_625deb {
let Inst{12-11} = 0b00;
let addrMode = BaseImmOffset;
let accessSize = HalfWordAccess;
@@ -25422,7 +25507,7 @@ def SS2_storew_sp : HInst<
(outs),
(ins u5_2Imm:$Ii, GeneralSubRegs:$Rt16),
"memw(r29+#$Ii) = $Rt16",
-PSEUDO, TypeSUBINSN>, Enc_6690615 {
+tc_c14739d5, TypeSUBINSN>, Enc_87c142 {
let Inst{12-9} = 0b0100;
let addrMode = BaseImmOffset;
let accessSize = WordAccess;
@@ -25435,7 +25520,7 @@ def SS2_storewi0 : HInst<
(outs),
(ins GeneralSubRegs:$Rs16, u4_2Imm:$Ii),
"memw($Rs16+#$Ii) = #0",
-PSEUDO, TypeSUBINSN>, Enc_15536400 {
+tc_6c52d277, TypeSUBINSN>, Enc_a6ce9c {
let Inst{12-8} = 0b10000;
let addrMode = BaseImmOffset;
let accessSize = WordAccess;
@@ -25447,7 +25532,7 @@ def SS2_storewi1 : HInst<
(outs),
(ins GeneralSubRegs:$Rs16, u4_2Imm:$Ii),
"memw($Rs16+#$Ii) = #1",
-PSEUDO, TypeSUBINSN>, Enc_15536400 {
+tc_6c52d277, TypeSUBINSN>, Enc_a6ce9c {
let Inst{12-8} = 0b10001;
let addrMode = BaseImmOffset;
let accessSize = WordAccess;
@@ -25759,7 +25844,7 @@ def V6_extractw : HInst<
(outs IntRegs:$Rd32),
(ins VectorRegs:$Vu32, IntRegs:$Rs32),
"$Rd32 = vextract($Vu32,$Rs32)",
-LD_tc_ld_SLOT0, TypeLD>, Enc_16601956, Requires<[HasV60T,UseHVX]> {
+tc_9777e6bf, TypeLD>, Enc_50e578, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b001;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b10010010000;
@@ -25773,7 +25858,7 @@ def V6_extractw_128B : HInst<
(outs IntRegs:$Rd32),
(ins VectorRegs128B:$Vu32, IntRegs:$Rs32),
"$Rd32 = vextract($Vu32,$Rs32)",
-LD_tc_ld_SLOT0, TypeLD>, Enc_16601956, Requires<[HasV60T,UseHVX]> {
+tc_9777e6bf, TypeLD>, Enc_50e578, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b001;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b10010010000;
@@ -25851,6 +25936,144 @@ let isCodeGenOnly = 1;
let DecoderNamespace = "EXT_mmvec";
let isCodeGenOnly = 1;
}
+def V6_ldcnp0 : HInst<
+(outs VectorRegs:$Vd32),
+(ins PredRegs:$Pv4, IntRegs:$Rt32),
+"if (!$Pv4) $Vd32.cur = vmem($Rt32)",
+PSEUDO, TypeMAPPING>, Requires<[HasV62T,UseHVX]> {
+let hasNewValue = 1;
+let opNewValue = 0;
+let isPseudo = 1;
+let isCodeGenOnly = 1;
+let DecoderNamespace = "EXT_mmvec";
+}
+def V6_ldcnp0_128B : HInst<
+(outs VectorRegs128B:$Vd32),
+(ins PredRegs:$Pv4, IntRegs:$Rt32),
+"if (!$Pv4) $Vd32.cur = vmem($Rt32)",
+PSEUDO, TypeMAPPING>, Requires<[HasV62T,UseHVX]> {
+let hasNewValue = 1;
+let opNewValue = 0;
+let isPseudo = 1;
+let isCodeGenOnly = 1;
+let DecoderNamespace = "EXT_mmvec";
+let isCodeGenOnly = 1;
+}
+def V6_ldcnpnt0 : HInst<
+(outs VectorRegs:$Vd32),
+(ins PredRegs:$Pv4, IntRegs:$Rt32),
+"if (!$Pv4) $Vd32.cur = vmem($Rt32):nt",
+PSEUDO, TypeMAPPING>, Requires<[HasV62T,UseHVX]> {
+let hasNewValue = 1;
+let opNewValue = 0;
+let isPseudo = 1;
+let isCodeGenOnly = 1;
+let DecoderNamespace = "EXT_mmvec";
+}
+def V6_ldcnpnt0_128B : HInst<
+(outs VectorRegs128B:$Vd32),
+(ins PredRegs:$Pv4, IntRegs:$Rt32),
+"if (!$Pv4) $Vd32.cur = vmem($Rt32):nt",
+PSEUDO, TypeMAPPING>, Requires<[HasV62T,UseHVX]> {
+let hasNewValue = 1;
+let opNewValue = 0;
+let isPseudo = 1;
+let isCodeGenOnly = 1;
+let DecoderNamespace = "EXT_mmvec";
+let isCodeGenOnly = 1;
+}
+def V6_ldcp0 : HInst<
+(outs VectorRegs:$Vd32),
+(ins PredRegs:$Pv4, IntRegs:$Rt32),
+"if ($Pv4) $Vd32.cur = vmem($Rt32)",
+PSEUDO, TypeMAPPING>, Requires<[HasV62T,UseHVX]> {
+let hasNewValue = 1;
+let opNewValue = 0;
+let isPseudo = 1;
+let isCodeGenOnly = 1;
+let DecoderNamespace = "EXT_mmvec";
+}
+def V6_ldcp0_128B : HInst<
+(outs VectorRegs128B:$Vd32),
+(ins PredRegs:$Pv4, IntRegs:$Rt32),
+"if ($Pv4) $Vd32.cur = vmem($Rt32)",
+PSEUDO, TypeMAPPING>, Requires<[HasV62T,UseHVX]> {
+let hasNewValue = 1;
+let opNewValue = 0;
+let isPseudo = 1;
+let isCodeGenOnly = 1;
+let DecoderNamespace = "EXT_mmvec";
+let isCodeGenOnly = 1;
+}
+def V6_ldcpnt0 : HInst<
+(outs VectorRegs:$Vd32),
+(ins PredRegs:$Pv4, IntRegs:$Rt32),
+"if ($Pv4) $Vd32.cur = vmem($Rt32):nt",
+PSEUDO, TypeMAPPING>, Requires<[HasV62T,UseHVX]> {
+let hasNewValue = 1;
+let opNewValue = 0;
+let isPseudo = 1;
+let isCodeGenOnly = 1;
+let DecoderNamespace = "EXT_mmvec";
+}
+def V6_ldcpnt0_128B : HInst<
+(outs VectorRegs128B:$Vd32),
+(ins PredRegs:$Pv4, IntRegs:$Rt32),
+"if ($Pv4) $Vd32.cur = vmem($Rt32):nt",
+PSEUDO, TypeMAPPING>, Requires<[HasV62T,UseHVX]> {
+let hasNewValue = 1;
+let opNewValue = 0;
+let isPseudo = 1;
+let isCodeGenOnly = 1;
+let DecoderNamespace = "EXT_mmvec";
+let isCodeGenOnly = 1;
+}
+def V6_ldnp0 : HInst<
+(outs VectorRegs:$Vd32),
+(ins PredRegs:$Pv4, IntRegs:$Rt32),
+"if (!$Pv4) $Vd32 = vmem($Rt32)",
+PSEUDO, TypeMAPPING>, Requires<[HasV62T,UseHVX]> {
+let hasNewValue = 1;
+let opNewValue = 0;
+let isPseudo = 1;
+let isCodeGenOnly = 1;
+let DecoderNamespace = "EXT_mmvec";
+}
+def V6_ldnp0_128B : HInst<
+(outs VectorRegs128B:$Vd32),
+(ins PredRegs:$Pv4, IntRegs:$Rt32),
+"if (!$Pv4) $Vd32 = vmem($Rt32)",
+PSEUDO, TypeMAPPING>, Requires<[HasV62T,UseHVX]> {
+let hasNewValue = 1;
+let opNewValue = 0;
+let isPseudo = 1;
+let isCodeGenOnly = 1;
+let DecoderNamespace = "EXT_mmvec";
+let isCodeGenOnly = 1;
+}
+def V6_ldnpnt0 : HInst<
+(outs VectorRegs:$Vd32),
+(ins PredRegs:$Pv4, IntRegs:$Rt32),
+"if (!$Pv4) $Vd32 = vmem($Rt32):nt",
+PSEUDO, TypeMAPPING>, Requires<[HasV62T,UseHVX]> {
+let hasNewValue = 1;
+let opNewValue = 0;
+let isPseudo = 1;
+let isCodeGenOnly = 1;
+let DecoderNamespace = "EXT_mmvec";
+}
+def V6_ldnpnt0_128B : HInst<
+(outs VectorRegs128B:$Vd32),
+(ins PredRegs:$Pv4, IntRegs:$Rt32),
+"if (!$Pv4) $Vd32 = vmem($Rt32):nt",
+PSEUDO, TypeMAPPING>, Requires<[HasV62T,UseHVX]> {
+let hasNewValue = 1;
+let opNewValue = 0;
+let isPseudo = 1;
+let isCodeGenOnly = 1;
+let DecoderNamespace = "EXT_mmvec";
+let isCodeGenOnly = 1;
+}
def V6_ldnt0 : HInst<
(outs VectorRegs:$Vd32),
(ins IntRegs:$Rt32),
@@ -25874,6 +26097,144 @@ let isCodeGenOnly = 1;
let DecoderNamespace = "EXT_mmvec";
let isCodeGenOnly = 1;
}
+def V6_ldp0 : HInst<
+(outs VectorRegs:$Vd32),
+(ins PredRegs:$Pv4, IntRegs:$Rt32),
+"if ($Pv4) $Vd32 = vmem($Rt32)",
+PSEUDO, TypeMAPPING>, Requires<[HasV62T,UseHVX]> {
+let hasNewValue = 1;
+let opNewValue = 0;
+let isPseudo = 1;
+let isCodeGenOnly = 1;
+let DecoderNamespace = "EXT_mmvec";
+}
+def V6_ldp0_128B : HInst<
+(outs VectorRegs128B:$Vd32),
+(ins PredRegs:$Pv4, IntRegs:$Rt32),
+"if ($Pv4) $Vd32 = vmem($Rt32)",
+PSEUDO, TypeMAPPING>, Requires<[HasV62T,UseHVX]> {
+let hasNewValue = 1;
+let opNewValue = 0;
+let isPseudo = 1;
+let isCodeGenOnly = 1;
+let DecoderNamespace = "EXT_mmvec";
+let isCodeGenOnly = 1;
+}
+def V6_ldpnt0 : HInst<
+(outs VectorRegs:$Vd32),
+(ins PredRegs:$Pv4, IntRegs:$Rt32),
+"if ($Pv4) $Vd32 = vmem($Rt32):nt",
+PSEUDO, TypeMAPPING>, Requires<[HasV62T,UseHVX]> {
+let hasNewValue = 1;
+let opNewValue = 0;
+let isPseudo = 1;
+let isCodeGenOnly = 1;
+let DecoderNamespace = "EXT_mmvec";
+}
+def V6_ldpnt0_128B : HInst<
+(outs VectorRegs128B:$Vd32),
+(ins PredRegs:$Pv4, IntRegs:$Rt32),
+"if ($Pv4) $Vd32 = vmem($Rt32):nt",
+PSEUDO, TypeMAPPING>, Requires<[HasV62T,UseHVX]> {
+let hasNewValue = 1;
+let opNewValue = 0;
+let isPseudo = 1;
+let isCodeGenOnly = 1;
+let DecoderNamespace = "EXT_mmvec";
+let isCodeGenOnly = 1;
+}
+def V6_ldtnp0 : HInst<
+(outs VectorRegs:$Vd32),
+(ins PredRegs:$Pv4, IntRegs:$Rt32),
+"if (!$Pv4) $Vd32.tmp = vmem($Rt32)",
+PSEUDO, TypeMAPPING>, Requires<[HasV62T,UseHVX]> {
+let hasNewValue = 1;
+let opNewValue = 0;
+let isPseudo = 1;
+let isCodeGenOnly = 1;
+let DecoderNamespace = "EXT_mmvec";
+}
+def V6_ldtnp0_128B : HInst<
+(outs VectorRegs128B:$Vd32),
+(ins PredRegs:$Pv4, IntRegs:$Rt32),
+"if (!$Pv4) $Vd32.tmp = vmem($Rt32)",
+PSEUDO, TypeMAPPING>, Requires<[HasV62T,UseHVX]> {
+let hasNewValue = 1;
+let opNewValue = 0;
+let isPseudo = 1;
+let isCodeGenOnly = 1;
+let DecoderNamespace = "EXT_mmvec";
+let isCodeGenOnly = 1;
+}
+def V6_ldtnpnt0 : HInst<
+(outs VectorRegs:$Vd32),
+(ins PredRegs:$Pv4, IntRegs:$Rt32),
+"if (!$Pv4) $Vd32.tmp = vmem($Rt32):nt",
+PSEUDO, TypeMAPPING>, Requires<[HasV62T,UseHVX]> {
+let hasNewValue = 1;
+let opNewValue = 0;
+let isPseudo = 1;
+let isCodeGenOnly = 1;
+let DecoderNamespace = "EXT_mmvec";
+}
+def V6_ldtnpnt0_128B : HInst<
+(outs VectorRegs128B:$Vd32),
+(ins PredRegs:$Pv4, IntRegs:$Rt32),
+"if (!$Pv4) $Vd32.tmp = vmem($Rt32):nt",
+PSEUDO, TypeMAPPING>, Requires<[HasV62T,UseHVX]> {
+let hasNewValue = 1;
+let opNewValue = 0;
+let isPseudo = 1;
+let isCodeGenOnly = 1;
+let DecoderNamespace = "EXT_mmvec";
+let isCodeGenOnly = 1;
+}
+def V6_ldtp0 : HInst<
+(outs VectorRegs:$Vd32),
+(ins PredRegs:$Pv4, IntRegs:$Rt32),
+"if ($Pv4) $Vd32.tmp = vmem($Rt32)",
+PSEUDO, TypeMAPPING>, Requires<[HasV62T,UseHVX]> {
+let hasNewValue = 1;
+let opNewValue = 0;
+let isPseudo = 1;
+let isCodeGenOnly = 1;
+let DecoderNamespace = "EXT_mmvec";
+}
+def V6_ldtp0_128B : HInst<
+(outs VectorRegs128B:$Vd32),
+(ins PredRegs:$Pv4, IntRegs:$Rt32),
+"if ($Pv4) $Vd32.tmp = vmem($Rt32)",
+PSEUDO, TypeMAPPING>, Requires<[HasV62T,UseHVX]> {
+let hasNewValue = 1;
+let opNewValue = 0;
+let isPseudo = 1;
+let isCodeGenOnly = 1;
+let DecoderNamespace = "EXT_mmvec";
+let isCodeGenOnly = 1;
+}
+def V6_ldtpnt0 : HInst<
+(outs VectorRegs:$Vd32),
+(ins PredRegs:$Pv4, IntRegs:$Rt32),
+"if ($Pv4) $Vd32.tmp = vmem($Rt32):nt",
+PSEUDO, TypeMAPPING>, Requires<[HasV62T,UseHVX]> {
+let hasNewValue = 1;
+let opNewValue = 0;
+let isPseudo = 1;
+let isCodeGenOnly = 1;
+let DecoderNamespace = "EXT_mmvec";
+}
+def V6_ldtpnt0_128B : HInst<
+(outs VectorRegs128B:$Vd32),
+(ins PredRegs:$Pv4, IntRegs:$Rt32),
+"if ($Pv4) $Vd32.tmp = vmem($Rt32):nt",
+PSEUDO, TypeMAPPING>, Requires<[HasV62T,UseHVX]> {
+let hasNewValue = 1;
+let opNewValue = 0;
+let isPseudo = 1;
+let isCodeGenOnly = 1;
+let DecoderNamespace = "EXT_mmvec";
+let isCodeGenOnly = 1;
+}
def V6_ldu0 : HInst<
(outs VectorRegs:$Vd32),
(ins IntRegs:$Rt32),
@@ -25922,7 +26283,7 @@ def V6_lvsplatb : HInst<
(outs VectorRegs:$Vd32),
(ins IntRegs:$Rt32),
"$Vd32.b = vsplat($Rt32)",
-CVI_VX, TypeCVI_VX>, Enc_9768377, Requires<[HasV62T,UseHVX]> {
+tc_6b78cf13, TypeCVI_VX>, Enc_a5ed8a, Requires<[HasV62T,UseHVX]> {
let Inst{13-5} = 0b000000010;
let Inst{31-21} = 0b00011001110;
let hasNewValue = 1;
@@ -25933,7 +26294,7 @@ def V6_lvsplatb_128B : HInst<
(outs VectorRegs128B:$Vd32),
(ins IntRegs:$Rt32),
"$Vd32.b = vsplat($Rt32)",
-CVI_VX, TypeCVI_VX>, Enc_9768377, Requires<[HasV62T,UseHVX]> {
+tc_6b78cf13, TypeCVI_VX>, Enc_a5ed8a, Requires<[HasV62T,UseHVX]> {
let Inst{13-5} = 0b000000010;
let Inst{31-21} = 0b00011001110;
let hasNewValue = 1;
@@ -25945,7 +26306,7 @@ def V6_lvsplath : HInst<
(outs VectorRegs:$Vd32),
(ins IntRegs:$Rt32),
"$Vd32.h = vsplat($Rt32)",
-CVI_VX, TypeCVI_VX>, Enc_9768377, Requires<[HasV62T,UseHVX]> {
+tc_6b78cf13, TypeCVI_VX>, Enc_a5ed8a, Requires<[HasV62T,UseHVX]> {
let Inst{13-5} = 0b000000001;
let Inst{31-21} = 0b00011001110;
let hasNewValue = 1;
@@ -25956,7 +26317,7 @@ def V6_lvsplath_128B : HInst<
(outs VectorRegs128B:$Vd32),
(ins IntRegs:$Rt32),
"$Vd32.h = vsplat($Rt32)",
-CVI_VX, TypeCVI_VX>, Enc_9768377, Requires<[HasV62T,UseHVX]> {
+tc_6b78cf13, TypeCVI_VX>, Enc_a5ed8a, Requires<[HasV62T,UseHVX]> {
let Inst{13-5} = 0b000000001;
let Inst{31-21} = 0b00011001110;
let hasNewValue = 1;
@@ -25968,7 +26329,7 @@ def V6_lvsplatw : HInst<
(outs VectorRegs:$Vd32),
(ins IntRegs:$Rt32),
"$Vd32 = vsplat($Rt32)",
-CVI_VX_LATE, TypeCVI_VX>, Enc_9768377, Requires<[HasV60T,UseHVX]> {
+tc_6b78cf13, TypeCVI_VX_LATE>, Enc_a5ed8a, Requires<[HasV60T,UseHVX]> {
let Inst{13-5} = 0b000000001;
let Inst{31-21} = 0b00011001101;
let hasNewValue = 1;
@@ -25979,7 +26340,7 @@ def V6_lvsplatw_128B : HInst<
(outs VectorRegs128B:$Vd32),
(ins IntRegs:$Rt32),
"$Vd32 = vsplat($Rt32)",
-CVI_VX_LATE, TypeCVI_VX>, Enc_9768377, Requires<[HasV60T,UseHVX]> {
+tc_6b78cf13, TypeCVI_VX_LATE>, Enc_a5ed8a, Requires<[HasV60T,UseHVX]> {
let Inst{13-5} = 0b000000001;
let Inst{31-21} = 0b00011001101;
let hasNewValue = 1;
@@ -25991,7 +26352,7 @@ def V6_pred_and : HInst<
(outs VecPredRegs:$Qd4),
(ins VecPredRegs:$Qs4, VecPredRegs:$Qt4),
"$Qd4 = and($Qs4,$Qt4)",
-CVI_VA_DV, TypeCVI_VA_DV>, Enc_6091631, Requires<[HasV60T,UseHVX]> {
+tc_97c165b9, TypeCVI_VA_DV>, Enc_134437, Requires<[HasV60T,UseHVX]> {
let Inst{7-2} = 0b000000;
let Inst{13-10} = 0b0000;
let Inst{21-16} = 0b000011;
@@ -26004,7 +26365,7 @@ def V6_pred_and_128B : HInst<
(outs VecPredRegs128B:$Qd4),
(ins VecPredRegs128B:$Qs4, VecPredRegs128B:$Qt4),
"$Qd4 = and($Qs4,$Qt4)",
-CVI_VA_DV, TypeCVI_VA_DV>, Enc_6091631, Requires<[HasV60T,UseHVX]> {
+tc_97c165b9, TypeCVI_VA_DV>, Enc_134437, Requires<[HasV60T,UseHVX]> {
let Inst{7-2} = 0b000000;
let Inst{13-10} = 0b0000;
let Inst{21-16} = 0b000011;
@@ -26018,7 +26379,7 @@ def V6_pred_and_n : HInst<
(outs VecPredRegs:$Qd4),
(ins VecPredRegs:$Qs4, VecPredRegs:$Qt4),
"$Qd4 = and($Qs4,!$Qt4)",
-CVI_VA_DV, TypeCVI_VA_DV>, Enc_6091631, Requires<[HasV60T,UseHVX]> {
+tc_97c165b9, TypeCVI_VA_DV>, Enc_134437, Requires<[HasV60T,UseHVX]> {
let Inst{7-2} = 0b000101;
let Inst{13-10} = 0b0000;
let Inst{21-16} = 0b000011;
@@ -26031,7 +26392,7 @@ def V6_pred_and_n_128B : HInst<
(outs VecPredRegs128B:$Qd4),
(ins VecPredRegs128B:$Qs4, VecPredRegs128B:$Qt4),
"$Qd4 = and($Qs4,!$Qt4)",
-CVI_VA_DV, TypeCVI_VA_DV>, Enc_6091631, Requires<[HasV60T,UseHVX]> {
+tc_97c165b9, TypeCVI_VA_DV>, Enc_134437, Requires<[HasV60T,UseHVX]> {
let Inst{7-2} = 0b000101;
let Inst{13-10} = 0b0000;
let Inst{21-16} = 0b000011;
@@ -26045,7 +26406,7 @@ def V6_pred_not : HInst<
(outs VecPredRegs:$Qd4),
(ins VecPredRegs:$Qs4),
"$Qd4 = not($Qs4)",
-CVI_VA, TypeCVI_VA>, Enc_4897205, Requires<[HasV60T,UseHVX]> {
+tc_71337255, TypeCVI_VA>, Enc_bfbf03, Requires<[HasV60T,UseHVX]> {
let Inst{7-2} = 0b000010;
let Inst{13-10} = 0b0000;
let Inst{31-16} = 0b0001111000000011;
@@ -26057,7 +26418,7 @@ def V6_pred_not_128B : HInst<
(outs VecPredRegs128B:$Qd4),
(ins VecPredRegs128B:$Qs4),
"$Qd4 = not($Qs4)",
-CVI_VA, TypeCVI_VA>, Enc_4897205, Requires<[HasV60T,UseHVX]> {
+tc_71337255, TypeCVI_VA>, Enc_bfbf03, Requires<[HasV60T,UseHVX]> {
let Inst{7-2} = 0b000010;
let Inst{13-10} = 0b0000;
let Inst{31-16} = 0b0001111000000011;
@@ -26070,7 +26431,7 @@ def V6_pred_or : HInst<
(outs VecPredRegs:$Qd4),
(ins VecPredRegs:$Qs4, VecPredRegs:$Qt4),
"$Qd4 = or($Qs4,$Qt4)",
-CVI_VA_DV, TypeCVI_VA_DV>, Enc_6091631, Requires<[HasV60T,UseHVX]> {
+tc_97c165b9, TypeCVI_VA_DV>, Enc_134437, Requires<[HasV60T,UseHVX]> {
let Inst{7-2} = 0b000001;
let Inst{13-10} = 0b0000;
let Inst{21-16} = 0b000011;
@@ -26083,7 +26444,7 @@ def V6_pred_or_128B : HInst<
(outs VecPredRegs128B:$Qd4),
(ins VecPredRegs128B:$Qs4, VecPredRegs128B:$Qt4),
"$Qd4 = or($Qs4,$Qt4)",
-CVI_VA_DV, TypeCVI_VA_DV>, Enc_6091631, Requires<[HasV60T,UseHVX]> {
+tc_97c165b9, TypeCVI_VA_DV>, Enc_134437, Requires<[HasV60T,UseHVX]> {
let Inst{7-2} = 0b000001;
let Inst{13-10} = 0b0000;
let Inst{21-16} = 0b000011;
@@ -26097,7 +26458,7 @@ def V6_pred_or_n : HInst<
(outs VecPredRegs:$Qd4),
(ins VecPredRegs:$Qs4, VecPredRegs:$Qt4),
"$Qd4 = or($Qs4,!$Qt4)",
-CVI_VA_DV, TypeCVI_VA_DV>, Enc_6091631, Requires<[HasV60T,UseHVX]> {
+tc_97c165b9, TypeCVI_VA_DV>, Enc_134437, Requires<[HasV60T,UseHVX]> {
let Inst{7-2} = 0b000100;
let Inst{13-10} = 0b0000;
let Inst{21-16} = 0b000011;
@@ -26110,7 +26471,7 @@ def V6_pred_or_n_128B : HInst<
(outs VecPredRegs128B:$Qd4),
(ins VecPredRegs128B:$Qs4, VecPredRegs128B:$Qt4),
"$Qd4 = or($Qs4,!$Qt4)",
-CVI_VA_DV, TypeCVI_VA_DV>, Enc_6091631, Requires<[HasV60T,UseHVX]> {
+tc_97c165b9, TypeCVI_VA_DV>, Enc_134437, Requires<[HasV60T,UseHVX]> {
let Inst{7-2} = 0b000100;
let Inst{13-10} = 0b0000;
let Inst{21-16} = 0b000011;
@@ -26124,7 +26485,7 @@ def V6_pred_scalar2 : HInst<
(outs VecPredRegs:$Qd4),
(ins IntRegs:$Rt32),
"$Qd4 = vsetq($Rt32)",
-CVI_VP_LONG, TypeCVI_VP>, Enc_12781442, Requires<[HasV60T,UseHVX]> {
+tc_4105d6b5, TypeCVI_VP>, Enc_7222b7, Requires<[HasV60T,UseHVX]> {
let Inst{13-2} = 0b000000010001;
let Inst{31-21} = 0b00011001101;
let hasNewValue = 1;
@@ -26135,7 +26496,7 @@ def V6_pred_scalar2_128B : HInst<
(outs VecPredRegs128B:$Qd4),
(ins IntRegs:$Rt32),
"$Qd4 = vsetq($Rt32)",
-CVI_VP_LONG, TypeCVI_VP>, Enc_12781442, Requires<[HasV60T,UseHVX]> {
+tc_4105d6b5, TypeCVI_VP>, Enc_7222b7, Requires<[HasV60T,UseHVX]> {
let Inst{13-2} = 0b000000010001;
let Inst{31-21} = 0b00011001101;
let hasNewValue = 1;
@@ -26147,7 +26508,7 @@ def V6_pred_scalar2v2 : HInst<
(outs VecPredRegs:$Qd4),
(ins IntRegs:$Rt32),
"$Qd4 = vsetq2($Rt32)",
-CVI_VP_LONG, TypeCVI_VP>, Enc_12781442, Requires<[HasV62T,UseHVX]> {
+tc_4105d6b5, TypeCVI_VP>, Enc_7222b7, Requires<[HasV62T,UseHVX]> {
let Inst{13-2} = 0b000000010011;
let Inst{31-21} = 0b00011001101;
let hasNewValue = 1;
@@ -26158,7 +26519,7 @@ def V6_pred_scalar2v2_128B : HInst<
(outs VecPredRegs128B:$Qd4),
(ins IntRegs:$Rt32),
"$Qd4 = vsetq2($Rt32)",
-CVI_VP_LONG, TypeCVI_VP>, Enc_12781442, Requires<[HasV62T,UseHVX]> {
+tc_4105d6b5, TypeCVI_VP>, Enc_7222b7, Requires<[HasV62T,UseHVX]> {
let Inst{13-2} = 0b000000010011;
let Inst{31-21} = 0b00011001101;
let hasNewValue = 1;
@@ -26170,7 +26531,7 @@ def V6_pred_xor : HInst<
(outs VecPredRegs:$Qd4),
(ins VecPredRegs:$Qs4, VecPredRegs:$Qt4),
"$Qd4 = xor($Qs4,$Qt4)",
-CVI_VA_DV, TypeCVI_VA_DV>, Enc_6091631, Requires<[HasV60T,UseHVX]> {
+tc_97c165b9, TypeCVI_VA_DV>, Enc_134437, Requires<[HasV60T,UseHVX]> {
let Inst{7-2} = 0b000011;
let Inst{13-10} = 0b0000;
let Inst{21-16} = 0b000011;
@@ -26183,7 +26544,7 @@ def V6_pred_xor_128B : HInst<
(outs VecPredRegs128B:$Qd4),
(ins VecPredRegs128B:$Qs4, VecPredRegs128B:$Qt4),
"$Qd4 = xor($Qs4,$Qt4)",
-CVI_VA_DV, TypeCVI_VA_DV>, Enc_6091631, Requires<[HasV60T,UseHVX]> {
+tc_97c165b9, TypeCVI_VA_DV>, Enc_134437, Requires<[HasV60T,UseHVX]> {
let Inst{7-2} = 0b000011;
let Inst{13-10} = 0b0000;
let Inst{21-16} = 0b000011;
@@ -26197,7 +26558,7 @@ def V6_shuffeqh : HInst<
(outs VecPredRegs:$Qd4),
(ins VecPredRegs:$Qs4, VecPredRegs:$Qt4),
"$Qd4.b = vshuffe($Qs4.h,$Qt4.h)",
-CVI_VA_DV, TypeCVI_VA_DV>, Enc_6091631, Requires<[HasV62T,UseHVX]> {
+tc_97c165b9, TypeCVI_VA_DV>, Enc_134437, Requires<[HasV62T,UseHVX]> {
let Inst{7-2} = 0b000110;
let Inst{13-10} = 0b0000;
let Inst{21-16} = 0b000011;
@@ -26210,7 +26571,7 @@ def V6_shuffeqh_128B : HInst<
(outs VecPredRegs128B:$Qd4),
(ins VecPredRegs128B:$Qs4, VecPredRegs128B:$Qt4),
"$Qd4.b = vshuffe($Qs4.h,$Qt4.h)",
-CVI_VA_DV, TypeCVI_VA_DV>, Enc_6091631, Requires<[HasV62T,UseHVX]> {
+tc_97c165b9, TypeCVI_VA_DV>, Enc_134437, Requires<[HasV62T,UseHVX]> {
let Inst{7-2} = 0b000110;
let Inst{13-10} = 0b0000;
let Inst{21-16} = 0b000011;
@@ -26224,7 +26585,7 @@ def V6_shuffeqw : HInst<
(outs VecPredRegs:$Qd4),
(ins VecPredRegs:$Qs4, VecPredRegs:$Qt4),
"$Qd4.h = vshuffe($Qs4.w,$Qt4.w)",
-CVI_VA_DV, TypeCVI_VA_DV>, Enc_6091631, Requires<[HasV62T,UseHVX]> {
+tc_97c165b9, TypeCVI_VA_DV>, Enc_134437, Requires<[HasV62T,UseHVX]> {
let Inst{7-2} = 0b000111;
let Inst{13-10} = 0b0000;
let Inst{21-16} = 0b000011;
@@ -26237,7 +26598,7 @@ def V6_shuffeqw_128B : HInst<
(outs VecPredRegs128B:$Qd4),
(ins VecPredRegs128B:$Qs4, VecPredRegs128B:$Qt4),
"$Qd4.h = vshuffe($Qs4.w,$Qt4.w)",
-CVI_VA_DV, TypeCVI_VA_DV>, Enc_6091631, Requires<[HasV62T,UseHVX]> {
+tc_97c165b9, TypeCVI_VA_DV>, Enc_134437, Requires<[HasV62T,UseHVX]> {
let Inst{7-2} = 0b000111;
let Inst{13-10} = 0b0000;
let Inst{21-16} = 0b000011;
@@ -26540,7 +26901,7 @@ def V6_vL32Ub_ai : HInst<
(outs VectorRegs:$Vd32),
(ins IntRegs:$Rt32, s4_0Imm:$Ii),
"$Vd32 = vmemu($Rt32+#$Ii)",
-CVI_VM_VP_LDU, TypeCVI_VM_VP_LDU>, Enc_1244745, Requires<[HasV60T,UseHVX]> {
+tc_35e92f8e, TypeCVI_VM_VP_LDU>, Enc_f3f408, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b111;
let Inst{12-11} = 0b00;
let Inst{31-21} = 0b00101000000;
@@ -26556,7 +26917,7 @@ def V6_vL32Ub_ai_128B : HInst<
(outs VectorRegs128B:$Vd32),
(ins IntRegs:$Rt32, s4_0Imm:$Ii),
"$Vd32 = vmemu($Rt32+#$Ii)",
-CVI_VM_VP_LDU, TypeCVI_VM_VP_LDU>, Enc_8437395, Requires<[HasV60T,UseHVX]> {
+tc_35e92f8e, TypeCVI_VM_VP_LDU>, Enc_f3f408, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b111;
let Inst{12-11} = 0b00;
let Inst{31-21} = 0b00101000000;
@@ -26573,7 +26934,7 @@ def V6_vL32Ub_pi : HInst<
(outs VectorRegs:$Vd32, IntRegs:$Rx32),
(ins IntRegs:$Rx32in, s3_0Imm:$Ii),
"$Vd32 = vmemu($Rx32++#$Ii)",
-CVI_VM_VP_LDU, TypeCVI_VM_VP_LDU>, Enc_10039393, Requires<[HasV60T,UseHVX]> {
+tc_4fd8566e, TypeCVI_VM_VP_LDU>, Enc_a255dc, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b111;
let Inst{13-11} = 0b000;
let Inst{31-21} = 0b00101001000;
@@ -26590,7 +26951,7 @@ def V6_vL32Ub_pi_128B : HInst<
(outs VectorRegs128B:$Vd32, IntRegs:$Rx32),
(ins IntRegs:$Rx32in, s3_0Imm:$Ii),
"$Vd32 = vmemu($Rx32++#$Ii)",
-CVI_VM_VP_LDU, TypeCVI_VM_VP_LDU>, Enc_11039423, Requires<[HasV60T,UseHVX]> {
+tc_4fd8566e, TypeCVI_VM_VP_LDU>, Enc_a255dc, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b111;
let Inst{13-11} = 0b000;
let Inst{31-21} = 0b00101001000;
@@ -26608,7 +26969,7 @@ def V6_vL32Ub_ppu : HInst<
(outs VectorRegs:$Vd32, IntRegs:$Rx32),
(ins IntRegs:$Rx32in, ModRegs:$Mu2),
"$Vd32 = vmemu($Rx32++$Mu2)",
-CVI_VM_VP_LDU, TypeCVI_VM_VP_LDU>, Enc_15949334, Requires<[HasV60T,UseHVX]> {
+tc_4fd8566e, TypeCVI_VM_VP_LDU>, Enc_2ebe3b, Requires<[HasV60T,UseHVX]> {
let Inst{12-5} = 0b00000111;
let Inst{31-21} = 0b00101011000;
let hasNewValue = 1;
@@ -26624,7 +26985,7 @@ def V6_vL32Ub_ppu_128B : HInst<
(outs VectorRegs128B:$Vd32, IntRegs:$Rx32),
(ins IntRegs:$Rx32in, ModRegs:$Mu2),
"$Vd32 = vmemu($Rx32++$Mu2)",
-CVI_VM_VP_LDU, TypeCVI_VM_VP_LDU>, Enc_15949334, Requires<[HasV60T,UseHVX]> {
+tc_4fd8566e, TypeCVI_VM_VP_LDU>, Enc_2ebe3b, Requires<[HasV60T,UseHVX]> {
let Inst{12-5} = 0b00000111;
let Inst{31-21} = 0b00101011000;
let hasNewValue = 1;
@@ -26641,7 +27002,7 @@ def V6_vL32b_ai : HInst<
(outs VectorRegs:$Vd32),
(ins IntRegs:$Rt32, s4_0Imm:$Ii),
"$Vd32 = vmem($Rt32+#$Ii)",
-CVI_VM_LD, TypeCVI_VM_LD>, Enc_1244745, Requires<[HasV60T,UseHVX]> {
+tc_b712833a, TypeCVI_VM_LD>, Enc_f3f408, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b000;
let Inst{12-11} = 0b00;
let Inst{31-21} = 0b00101000000;
@@ -26658,7 +27019,7 @@ def V6_vL32b_ai_128B : HInst<
(outs VectorRegs128B:$Vd32),
(ins IntRegs:$Rt32, s4_0Imm:$Ii),
"$Vd32 = vmem($Rt32+#$Ii)",
-CVI_VM_LD, TypeCVI_VM_LD>, Enc_8437395, Requires<[HasV60T,UseHVX]> {
+tc_b712833a, TypeCVI_VM_LD>, Enc_f3f408, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b000;
let Inst{12-11} = 0b00;
let Inst{31-21} = 0b00101000000;
@@ -26676,7 +27037,7 @@ def V6_vL32b_cur_ai : HInst<
(outs VectorRegs:$Vd32),
(ins IntRegs:$Rt32, s4_0Imm:$Ii),
"$Vd32.cur = vmem($Rt32+#$Ii)",
-CVI_VM_LD, TypeCVI_VM_LD>, Enc_1244745, Requires<[HasV60T,UseHVX]> {
+tc_b712833a, TypeCVI_VM_LD>, Enc_f3f408, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b001;
let Inst{12-11} = 0b00;
let Inst{31-21} = 0b00101000000;
@@ -26693,7 +27054,7 @@ def V6_vL32b_cur_ai_128B : HInst<
(outs VectorRegs128B:$Vd32),
(ins IntRegs:$Rt32, s4_0Imm:$Ii),
"$Vd32.cur = vmem($Rt32+#$Ii)",
-CVI_VM_LD, TypeCVI_VM_LD>, Enc_8437395, Requires<[HasV60T,UseHVX]> {
+tc_b712833a, TypeCVI_VM_LD>, Enc_f3f408, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b001;
let Inst{12-11} = 0b00;
let Inst{31-21} = 0b00101000000;
@@ -26711,7 +27072,7 @@ def V6_vL32b_cur_npred_ai : HInst<
(outs VectorRegs:$Vd32),
(ins PredRegs:$Pv4, IntRegs:$Rt32, s4_0Imm:$Ii),
"if (!$Pv4) $Vd32.cur = vmem($Rt32+#$Ii)",
-CVI_VM_LD, TypeCVI_VM_LD>, Enc_13338314, Requires<[HasV62T,UseHVX]> {
+tc_5cbf490b, TypeCVI_VM_LD>, Enc_8d8a30, Requires<[HasV62T,UseHVX]> {
let Inst{7-5} = 0b101;
let Inst{31-21} = 0b00101000100;
let isPredicated = 1;
@@ -26729,7 +27090,7 @@ def V6_vL32b_cur_npred_ai_128B : HInst<
(outs VectorRegs128B:$Vd32),
(ins PredRegs:$Pv4, IntRegs:$Rt32, s4_0Imm:$Ii),
"if (!$Pv4) $Vd32.cur = vmem($Rt32+#$Ii)",
-CVI_VM_LD, TypeCVI_VM_LD>, Enc_738356, Requires<[HasV62T,UseHVX]> {
+tc_5cbf490b, TypeCVI_VM_LD>, Enc_8d8a30, Requires<[HasV62T,UseHVX]> {
let Inst{7-5} = 0b101;
let Inst{31-21} = 0b00101000100;
let isPredicated = 1;
@@ -26748,7 +27109,7 @@ def V6_vL32b_cur_npred_pi : HInst<
(outs VectorRegs:$Vd32, IntRegs:$Rx32),
(ins PredRegs:$Pv4, IntRegs:$Rx32in, s3_0Imm:$Ii),
"if (!$Pv4) $Vd32.cur = vmem($Rx32++#$Ii)",
-CVI_VM_LD, TypeCVI_VM_LD>, Enc_14560494, Requires<[HasV62T,UseHVX]> {
+tc_da979fb3, TypeCVI_VM_LD>, Enc_58a8bf, Requires<[HasV62T,UseHVX]> {
let Inst{7-5} = 0b101;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00101001100;
@@ -26768,7 +27129,7 @@ def V6_vL32b_cur_npred_pi_128B : HInst<
(outs VectorRegs128B:$Vd32, IntRegs:$Rx32),
(ins PredRegs:$Pv4, IntRegs:$Rx32in, s3_0Imm:$Ii),
"if (!$Pv4) $Vd32.cur = vmem($Rx32++#$Ii)",
-CVI_VM_LD, TypeCVI_VM_LD>, Enc_15560488, Requires<[HasV62T,UseHVX]> {
+tc_da979fb3, TypeCVI_VM_LD>, Enc_58a8bf, Requires<[HasV62T,UseHVX]> {
let Inst{7-5} = 0b101;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00101001100;
@@ -26789,7 +27150,7 @@ def V6_vL32b_cur_npred_ppu : HInst<
(outs VectorRegs:$Vd32, IntRegs:$Rx32),
(ins PredRegs:$Pv4, IntRegs:$Rx32in, ModRegs:$Mu2),
"if (!$Pv4) $Vd32.cur = vmem($Rx32++$Mu2)",
-CVI_VM_LD, TypeCVI_VM_LD>, Enc_3158657, Requires<[HasV62T,UseHVX]> {
+tc_da979fb3, TypeCVI_VM_LD>, Enc_f8c1c4, Requires<[HasV62T,UseHVX]> {
let Inst{10-5} = 0b000101;
let Inst{31-21} = 0b00101011100;
let isPredicated = 1;
@@ -26808,7 +27169,7 @@ def V6_vL32b_cur_npred_ppu_128B : HInst<
(outs VectorRegs128B:$Vd32, IntRegs:$Rx32),
(ins PredRegs:$Pv4, IntRegs:$Rx32in, ModRegs:$Mu2),
"if (!$Pv4) $Vd32.cur = vmem($Rx32++$Mu2)",
-CVI_VM_LD, TypeCVI_VM_LD>, Enc_3158657, Requires<[HasV62T,UseHVX]> {
+tc_da979fb3, TypeCVI_VM_LD>, Enc_f8c1c4, Requires<[HasV62T,UseHVX]> {
let Inst{10-5} = 0b000101;
let Inst{31-21} = 0b00101011100;
let isPredicated = 1;
@@ -26828,7 +27189,7 @@ def V6_vL32b_cur_pi : HInst<
(outs VectorRegs:$Vd32, IntRegs:$Rx32),
(ins IntRegs:$Rx32in, s3_0Imm:$Ii),
"$Vd32.cur = vmem($Rx32++#$Ii)",
-CVI_VM_LD, TypeCVI_VM_LD>, Enc_10039393, Requires<[HasV60T,UseHVX]> {
+tc_eb669007, TypeCVI_VM_LD>, Enc_a255dc, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b001;
let Inst{13-11} = 0b000;
let Inst{31-21} = 0b00101001000;
@@ -26846,7 +27207,7 @@ def V6_vL32b_cur_pi_128B : HInst<
(outs VectorRegs128B:$Vd32, IntRegs:$Rx32),
(ins IntRegs:$Rx32in, s3_0Imm:$Ii),
"$Vd32.cur = vmem($Rx32++#$Ii)",
-CVI_VM_LD, TypeCVI_VM_LD>, Enc_11039423, Requires<[HasV60T,UseHVX]> {
+tc_eb669007, TypeCVI_VM_LD>, Enc_a255dc, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b001;
let Inst{13-11} = 0b000;
let Inst{31-21} = 0b00101001000;
@@ -26865,7 +27226,7 @@ def V6_vL32b_cur_ppu : HInst<
(outs VectorRegs:$Vd32, IntRegs:$Rx32),
(ins IntRegs:$Rx32in, ModRegs:$Mu2),
"$Vd32.cur = vmem($Rx32++$Mu2)",
-CVI_VM_LD, TypeCVI_VM_LD>, Enc_15949334, Requires<[HasV60T,UseHVX]> {
+tc_eb669007, TypeCVI_VM_LD>, Enc_2ebe3b, Requires<[HasV60T,UseHVX]> {
let Inst{12-5} = 0b00000001;
let Inst{31-21} = 0b00101011000;
let hasNewValue = 1;
@@ -26882,7 +27243,7 @@ def V6_vL32b_cur_ppu_128B : HInst<
(outs VectorRegs128B:$Vd32, IntRegs:$Rx32),
(ins IntRegs:$Rx32in, ModRegs:$Mu2),
"$Vd32.cur = vmem($Rx32++$Mu2)",
-CVI_VM_LD, TypeCVI_VM_LD>, Enc_15949334, Requires<[HasV60T,UseHVX]> {
+tc_eb669007, TypeCVI_VM_LD>, Enc_2ebe3b, Requires<[HasV60T,UseHVX]> {
let Inst{12-5} = 0b00000001;
let Inst{31-21} = 0b00101011000;
let hasNewValue = 1;
@@ -26900,7 +27261,7 @@ def V6_vL32b_cur_pred_ai : HInst<
(outs VectorRegs:$Vd32),
(ins PredRegs:$Pv4, IntRegs:$Rt32, s4_0Imm:$Ii),
"if ($Pv4) $Vd32.cur = vmem($Rt32+#$Ii)",
-CVI_VM_LD, TypeCVI_VM_LD>, Enc_13338314, Requires<[HasV62T,UseHVX]> {
+tc_5cbf490b, TypeCVI_VM_LD>, Enc_8d8a30, Requires<[HasV62T,UseHVX]> {
let Inst{7-5} = 0b100;
let Inst{31-21} = 0b00101000100;
let isPredicated = 1;
@@ -26917,7 +27278,7 @@ def V6_vL32b_cur_pred_ai_128B : HInst<
(outs VectorRegs128B:$Vd32),
(ins PredRegs:$Pv4, IntRegs:$Rt32, s4_0Imm:$Ii),
"if ($Pv4) $Vd32.cur = vmem($Rt32+#$Ii)",
-CVI_VM_LD, TypeCVI_VM_LD>, Enc_738356, Requires<[HasV62T,UseHVX]> {
+tc_5cbf490b, TypeCVI_VM_LD>, Enc_8d8a30, Requires<[HasV62T,UseHVX]> {
let Inst{7-5} = 0b100;
let Inst{31-21} = 0b00101000100;
let isPredicated = 1;
@@ -26935,7 +27296,7 @@ def V6_vL32b_cur_pred_pi : HInst<
(outs VectorRegs:$Vd32, IntRegs:$Rx32),
(ins PredRegs:$Pv4, IntRegs:$Rx32in, s3_0Imm:$Ii),
"if ($Pv4) $Vd32.cur = vmem($Rx32++#$Ii)",
-CVI_VM_LD, TypeCOPROC_VMEM>, Enc_14560494, Requires<[HasV62T,UseHVX]> {
+tc_da979fb3, TypeCVI_VM_LD>, Enc_58a8bf, Requires<[HasV62T,UseHVX]> {
let Inst{7-5} = 0b100;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00101001100;
@@ -26954,7 +27315,7 @@ def V6_vL32b_cur_pred_pi_128B : HInst<
(outs VectorRegs128B:$Vd32, IntRegs:$Rx32),
(ins PredRegs:$Pv4, IntRegs:$Rx32in, s3_0Imm:$Ii),
"if ($Pv4) $Vd32.cur = vmem($Rx32++#$Ii)",
-CVI_VM_LD, TypeCOPROC_VMEM>, Enc_15560488, Requires<[HasV62T,UseHVX]> {
+tc_da979fb3, TypeCVI_VM_LD>, Enc_58a8bf, Requires<[HasV62T,UseHVX]> {
let Inst{7-5} = 0b100;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00101001100;
@@ -26974,7 +27335,7 @@ def V6_vL32b_cur_pred_ppu : HInst<
(outs VectorRegs:$Vd32, IntRegs:$Rx32),
(ins PredRegs:$Pv4, IntRegs:$Rx32in, ModRegs:$Mu2),
"if ($Pv4) $Vd32.cur = vmem($Rx32++$Mu2)",
-CVI_VM_LD, TypeCVI_VM_LD>, Enc_3158657, Requires<[HasV62T,UseHVX]> {
+tc_da979fb3, TypeCVI_VM_LD>, Enc_f8c1c4, Requires<[HasV62T,UseHVX]> {
let Inst{10-5} = 0b000100;
let Inst{31-21} = 0b00101011100;
let isPredicated = 1;
@@ -26992,7 +27353,7 @@ def V6_vL32b_cur_pred_ppu_128B : HInst<
(outs VectorRegs128B:$Vd32, IntRegs:$Rx32),
(ins PredRegs:$Pv4, IntRegs:$Rx32in, ModRegs:$Mu2),
"if ($Pv4) $Vd32.cur = vmem($Rx32++$Mu2)",
-CVI_VM_LD, TypeCVI_VM_LD>, Enc_3158657, Requires<[HasV62T,UseHVX]> {
+tc_da979fb3, TypeCVI_VM_LD>, Enc_f8c1c4, Requires<[HasV62T,UseHVX]> {
let Inst{10-5} = 0b000100;
let Inst{31-21} = 0b00101011100;
let isPredicated = 1;
@@ -27011,7 +27372,7 @@ def V6_vL32b_npred_ai : HInst<
(outs VectorRegs:$Vd32),
(ins PredRegs:$Pv4, IntRegs:$Rt32, s4_0Imm:$Ii),
"if (!$Pv4) $Vd32 = vmem($Rt32+#$Ii)",
-CVI_VM_LD, TypeCVI_VM_LD>, Enc_13338314, Requires<[HasV62T,UseHVX]> {
+tc_5cbf490b, TypeCVI_VM_LD>, Enc_8d8a30, Requires<[HasV62T,UseHVX]> {
let Inst{7-5} = 0b011;
let Inst{31-21} = 0b00101000100;
let isPredicated = 1;
@@ -27028,7 +27389,7 @@ def V6_vL32b_npred_ai_128B : HInst<
(outs VectorRegs128B:$Vd32),
(ins PredRegs:$Pv4, IntRegs:$Rt32, s4_0Imm:$Ii),
"if (!$Pv4) $Vd32 = vmem($Rt32+#$Ii)",
-CVI_VM_LD, TypeCVI_VM_LD>, Enc_738356, Requires<[HasV62T,UseHVX]> {
+tc_5cbf490b, TypeCVI_VM_LD>, Enc_8d8a30, Requires<[HasV62T,UseHVX]> {
let Inst{7-5} = 0b011;
let Inst{31-21} = 0b00101000100;
let isPredicated = 1;
@@ -27046,7 +27407,7 @@ def V6_vL32b_npred_pi : HInst<
(outs VectorRegs:$Vd32, IntRegs:$Rx32),
(ins PredRegs:$Pv4, IntRegs:$Rx32in, s3_0Imm:$Ii),
"if (!$Pv4) $Vd32 = vmem($Rx32++#$Ii)",
-CVI_VM_LD, TypeCVI_VM_LD>, Enc_14560494, Requires<[HasV62T,UseHVX]> {
+tc_da979fb3, TypeCVI_VM_LD>, Enc_58a8bf, Requires<[HasV62T,UseHVX]> {
let Inst{7-5} = 0b011;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00101001100;
@@ -27065,7 +27426,7 @@ def V6_vL32b_npred_pi_128B : HInst<
(outs VectorRegs128B:$Vd32, IntRegs:$Rx32),
(ins PredRegs:$Pv4, IntRegs:$Rx32in, s3_0Imm:$Ii),
"if (!$Pv4) $Vd32 = vmem($Rx32++#$Ii)",
-CVI_VM_LD, TypeCVI_VM_LD>, Enc_15560488, Requires<[HasV62T,UseHVX]> {
+tc_da979fb3, TypeCVI_VM_LD>, Enc_58a8bf, Requires<[HasV62T,UseHVX]> {
let Inst{7-5} = 0b011;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00101001100;
@@ -27085,7 +27446,7 @@ def V6_vL32b_npred_ppu : HInst<
(outs VectorRegs:$Vd32, IntRegs:$Rx32),
(ins PredRegs:$Pv4, IntRegs:$Rx32in, ModRegs:$Mu2),
"if (!$Pv4) $Vd32 = vmem($Rx32++$Mu2)",
-CVI_VM_LD, TypeCVI_VM_LD>, Enc_3158657, Requires<[HasV62T,UseHVX]> {
+tc_da979fb3, TypeCVI_VM_LD>, Enc_f8c1c4, Requires<[HasV62T,UseHVX]> {
let Inst{10-5} = 0b000011;
let Inst{31-21} = 0b00101011100;
let isPredicated = 1;
@@ -27103,7 +27464,7 @@ def V6_vL32b_npred_ppu_128B : HInst<
(outs VectorRegs128B:$Vd32, IntRegs:$Rx32),
(ins PredRegs:$Pv4, IntRegs:$Rx32in, ModRegs:$Mu2),
"if (!$Pv4) $Vd32 = vmem($Rx32++$Mu2)",
-CVI_VM_LD, TypeCVI_VM_LD>, Enc_3158657, Requires<[HasV62T,UseHVX]> {
+tc_da979fb3, TypeCVI_VM_LD>, Enc_f8c1c4, Requires<[HasV62T,UseHVX]> {
let Inst{10-5} = 0b000011;
let Inst{31-21} = 0b00101011100;
let isPredicated = 1;
@@ -27122,7 +27483,7 @@ def V6_vL32b_nt_ai : HInst<
(outs VectorRegs:$Vd32),
(ins IntRegs:$Rt32, s4_0Imm:$Ii),
"$Vd32 = vmem($Rt32+#$Ii):nt",
-CVI_VM_LD, TypeCVI_VM_LD>, Enc_1244745, Requires<[HasV60T,UseHVX]> {
+tc_b712833a, TypeCVI_VM_LD>, Enc_f3f408, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b000;
let Inst{12-11} = 0b00;
let Inst{31-21} = 0b00101000010;
@@ -27131,8 +27492,8 @@ let opNewValue = 0;
let addrMode = BaseImmOffset;
let accessSize = Vector64Access;
let isCVLoad = 1;
-let isNonTemporal = 1;
let mayLoad = 1;
+let isNonTemporal = 1;
let isCVLoadable = 1;
let DecoderNamespace = "EXT_mmvec";
}
@@ -27140,7 +27501,7 @@ def V6_vL32b_nt_ai_128B : HInst<
(outs VectorRegs128B:$Vd32),
(ins IntRegs:$Rt32, s4_0Imm:$Ii),
"$Vd32 = vmem($Rt32+#$Ii):nt",
-CVI_VM_LD, TypeCVI_VM_LD>, Enc_8437395, Requires<[HasV60T,UseHVX]> {
+tc_b712833a, TypeCVI_VM_LD>, Enc_f3f408, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b000;
let Inst{12-11} = 0b00;
let Inst{31-21} = 0b00101000010;
@@ -27149,8 +27510,8 @@ let opNewValue = 0;
let addrMode = BaseImmOffset;
let accessSize = Vector128Access;
let isCVLoad = 1;
-let isNonTemporal = 1;
let mayLoad = 1;
+let isNonTemporal = 1;
let isCVLoadable = 1;
let DecoderNamespace = "EXT_mmvec";
let isCodeGenOnly = 1;
@@ -27159,7 +27520,7 @@ def V6_vL32b_nt_cur_ai : HInst<
(outs VectorRegs:$Vd32),
(ins IntRegs:$Rt32, s4_0Imm:$Ii),
"$Vd32.cur = vmem($Rt32+#$Ii):nt",
-CVI_VM_LD, TypeCVI_VM_LD>, Enc_1244745, Requires<[HasV60T,UseHVX]> {
+tc_b712833a, TypeCVI_VM_LD>, Enc_f3f408, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b001;
let Inst{12-11} = 0b00;
let Inst{31-21} = 0b00101000010;
@@ -27177,7 +27538,7 @@ def V6_vL32b_nt_cur_ai_128B : HInst<
(outs VectorRegs128B:$Vd32),
(ins IntRegs:$Rt32, s4_0Imm:$Ii),
"$Vd32.cur = vmem($Rt32+#$Ii):nt",
-CVI_VM_LD, TypeCVI_VM_LD>, Enc_8437395, Requires<[HasV60T,UseHVX]> {
+tc_b712833a, TypeCVI_VM_LD>, Enc_f3f408, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b001;
let Inst{12-11} = 0b00;
let Inst{31-21} = 0b00101000010;
@@ -27196,7 +27557,7 @@ def V6_vL32b_nt_cur_npred_ai : HInst<
(outs VectorRegs:$Vd32),
(ins PredRegs:$Pv4, IntRegs:$Rt32, s4_0Imm:$Ii),
"if (!$Pv4) $Vd32.cur = vmem($Rt32+#$Ii):nt",
-CVI_VM_LD, TypeCVI_VM_LD>, Enc_13338314, Requires<[HasV62T,UseHVX]> {
+tc_5cbf490b, TypeCVI_VM_LD>, Enc_8d8a30, Requires<[HasV62T,UseHVX]> {
let Inst{7-5} = 0b101;
let Inst{31-21} = 0b00101000110;
let isPredicated = 1;
@@ -27215,7 +27576,7 @@ def V6_vL32b_nt_cur_npred_ai_128B : HInst<
(outs VectorRegs128B:$Vd32),
(ins PredRegs:$Pv4, IntRegs:$Rt32, s4_0Imm:$Ii),
"if (!$Pv4) $Vd32.cur = vmem($Rt32+#$Ii):nt",
-CVI_VM_LD, TypeCVI_VM_LD>, Enc_738356, Requires<[HasV62T,UseHVX]> {
+tc_5cbf490b, TypeCVI_VM_LD>, Enc_8d8a30, Requires<[HasV62T,UseHVX]> {
let Inst{7-5} = 0b101;
let Inst{31-21} = 0b00101000110;
let isPredicated = 1;
@@ -27235,7 +27596,7 @@ def V6_vL32b_nt_cur_npred_pi : HInst<
(outs VectorRegs:$Vd32, IntRegs:$Rx32),
(ins PredRegs:$Pv4, IntRegs:$Rx32in, s3_0Imm:$Ii),
"if (!$Pv4) $Vd32.cur = vmem($Rx32++#$Ii):nt",
-CVI_VM_LD, TypeCVI_VM_LD>, Enc_14560494, Requires<[HasV62T,UseHVX]> {
+tc_da979fb3, TypeCVI_VM_LD>, Enc_58a8bf, Requires<[HasV62T,UseHVX]> {
let Inst{7-5} = 0b101;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00101001110;
@@ -27256,7 +27617,7 @@ def V6_vL32b_nt_cur_npred_pi_128B : HInst<
(outs VectorRegs128B:$Vd32, IntRegs:$Rx32),
(ins PredRegs:$Pv4, IntRegs:$Rx32in, s3_0Imm:$Ii),
"if (!$Pv4) $Vd32.cur = vmem($Rx32++#$Ii):nt",
-CVI_VM_LD, TypeCVI_VM_LD>, Enc_15560488, Requires<[HasV62T,UseHVX]> {
+tc_da979fb3, TypeCVI_VM_LD>, Enc_58a8bf, Requires<[HasV62T,UseHVX]> {
let Inst{7-5} = 0b101;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00101001110;
@@ -27278,7 +27639,7 @@ def V6_vL32b_nt_cur_npred_ppu : HInst<
(outs VectorRegs:$Vd32, IntRegs:$Rx32),
(ins PredRegs:$Pv4, IntRegs:$Rx32in, ModRegs:$Mu2),
"if (!$Pv4) $Vd32.cur = vmem($Rx32++$Mu2):nt",
-CVI_VM_LD, TypeCVI_VM_LD>, Enc_3158657, Requires<[HasV62T,UseHVX]> {
+tc_da979fb3, TypeCVI_VM_LD>, Enc_f8c1c4, Requires<[HasV62T,UseHVX]> {
let Inst{10-5} = 0b000101;
let Inst{31-21} = 0b00101011110;
let isPredicated = 1;
@@ -27298,7 +27659,7 @@ def V6_vL32b_nt_cur_npred_ppu_128B : HInst<
(outs VectorRegs128B:$Vd32, IntRegs:$Rx32),
(ins PredRegs:$Pv4, IntRegs:$Rx32in, ModRegs:$Mu2),
"if (!$Pv4) $Vd32.cur = vmem($Rx32++$Mu2):nt",
-CVI_VM_LD, TypeCVI_VM_LD>, Enc_3158657, Requires<[HasV62T,UseHVX]> {
+tc_da979fb3, TypeCVI_VM_LD>, Enc_f8c1c4, Requires<[HasV62T,UseHVX]> {
let Inst{10-5} = 0b000101;
let Inst{31-21} = 0b00101011110;
let isPredicated = 1;
@@ -27319,7 +27680,7 @@ def V6_vL32b_nt_cur_pi : HInst<
(outs VectorRegs:$Vd32, IntRegs:$Rx32),
(ins IntRegs:$Rx32in, s3_0Imm:$Ii),
"$Vd32.cur = vmem($Rx32++#$Ii):nt",
-CVI_VM_LD, TypeCVI_VM_LD>, Enc_10039393, Requires<[HasV60T,UseHVX]> {
+tc_eb669007, TypeCVI_VM_LD>, Enc_a255dc, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b001;
let Inst{13-11} = 0b000;
let Inst{31-21} = 0b00101001010;
@@ -27338,7 +27699,7 @@ def V6_vL32b_nt_cur_pi_128B : HInst<
(outs VectorRegs128B:$Vd32, IntRegs:$Rx32),
(ins IntRegs:$Rx32in, s3_0Imm:$Ii),
"$Vd32.cur = vmem($Rx32++#$Ii):nt",
-CVI_VM_LD, TypeCVI_VM_LD>, Enc_11039423, Requires<[HasV60T,UseHVX]> {
+tc_eb669007, TypeCVI_VM_LD>, Enc_a255dc, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b001;
let Inst{13-11} = 0b000;
let Inst{31-21} = 0b00101001010;
@@ -27358,7 +27719,7 @@ def V6_vL32b_nt_cur_ppu : HInst<
(outs VectorRegs:$Vd32, IntRegs:$Rx32),
(ins IntRegs:$Rx32in, ModRegs:$Mu2),
"$Vd32.cur = vmem($Rx32++$Mu2):nt",
-CVI_VM_LD, TypeCVI_VM_LD>, Enc_15949334, Requires<[HasV60T,UseHVX]> {
+tc_eb669007, TypeCVI_VM_LD>, Enc_2ebe3b, Requires<[HasV60T,UseHVX]> {
let Inst{12-5} = 0b00000001;
let Inst{31-21} = 0b00101011010;
let hasNewValue = 1;
@@ -27376,7 +27737,7 @@ def V6_vL32b_nt_cur_ppu_128B : HInst<
(outs VectorRegs128B:$Vd32, IntRegs:$Rx32),
(ins IntRegs:$Rx32in, ModRegs:$Mu2),
"$Vd32.cur = vmem($Rx32++$Mu2):nt",
-CVI_VM_LD, TypeCVI_VM_LD>, Enc_15949334, Requires<[HasV60T,UseHVX]> {
+tc_eb669007, TypeCVI_VM_LD>, Enc_2ebe3b, Requires<[HasV60T,UseHVX]> {
let Inst{12-5} = 0b00000001;
let Inst{31-21} = 0b00101011010;
let hasNewValue = 1;
@@ -27395,7 +27756,7 @@ def V6_vL32b_nt_cur_pred_ai : HInst<
(outs VectorRegs:$Vd32),
(ins PredRegs:$Pv4, IntRegs:$Rt32, s4_0Imm:$Ii),
"if ($Pv4) $Vd32.cur = vmem($Rt32+#$Ii):nt",
-CVI_VM_LD, TypeCVI_VM_LD>, Enc_13338314, Requires<[HasV62T,UseHVX]> {
+tc_5cbf490b, TypeCVI_VM_LD>, Enc_8d8a30, Requires<[HasV62T,UseHVX]> {
let Inst{7-5} = 0b100;
let Inst{31-21} = 0b00101000110;
let isPredicated = 1;
@@ -27413,7 +27774,7 @@ def V6_vL32b_nt_cur_pred_ai_128B : HInst<
(outs VectorRegs128B:$Vd32),
(ins PredRegs:$Pv4, IntRegs:$Rt32, s4_0Imm:$Ii),
"if ($Pv4) $Vd32.cur = vmem($Rt32+#$Ii):nt",
-CVI_VM_LD, TypeCVI_VM_LD>, Enc_738356, Requires<[HasV62T,UseHVX]> {
+tc_5cbf490b, TypeCVI_VM_LD>, Enc_8d8a30, Requires<[HasV62T,UseHVX]> {
let Inst{7-5} = 0b100;
let Inst{31-21} = 0b00101000110;
let isPredicated = 1;
@@ -27432,7 +27793,7 @@ def V6_vL32b_nt_cur_pred_pi : HInst<
(outs VectorRegs:$Vd32, IntRegs:$Rx32),
(ins PredRegs:$Pv4, IntRegs:$Rx32in, s3_0Imm:$Ii),
"if ($Pv4) $Vd32.cur = vmem($Rx32++#$Ii):nt",
-CVI_VM_LD, TypeCVI_VM_LD>, Enc_14560494, Requires<[HasV62T,UseHVX]> {
+tc_da979fb3, TypeCVI_VM_LD>, Enc_58a8bf, Requires<[HasV62T,UseHVX]> {
let Inst{7-5} = 0b100;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00101001110;
@@ -27452,7 +27813,7 @@ def V6_vL32b_nt_cur_pred_pi_128B : HInst<
(outs VectorRegs128B:$Vd32, IntRegs:$Rx32),
(ins PredRegs:$Pv4, IntRegs:$Rx32in, s3_0Imm:$Ii),
"if ($Pv4) $Vd32.cur = vmem($Rx32++#$Ii):nt",
-CVI_VM_LD, TypeCVI_VM_LD>, Enc_15560488, Requires<[HasV62T,UseHVX]> {
+tc_da979fb3, TypeCVI_VM_LD>, Enc_58a8bf, Requires<[HasV62T,UseHVX]> {
let Inst{7-5} = 0b100;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00101001110;
@@ -27473,7 +27834,7 @@ def V6_vL32b_nt_cur_pred_ppu : HInst<
(outs VectorRegs:$Vd32, IntRegs:$Rx32),
(ins PredRegs:$Pv4, IntRegs:$Rx32in, ModRegs:$Mu2),
"if ($Pv4) $Vd32.cur = vmem($Rx32++$Mu2):nt",
-CVI_VM_LD, TypeCOPROC_VMEM>, Enc_3158657, Requires<[HasV62T,UseHVX]> {
+tc_da979fb3, TypeCVI_VM_LD>, Enc_f8c1c4, Requires<[HasV62T,UseHVX]> {
let Inst{10-5} = 0b000100;
let Inst{31-21} = 0b00101011110;
let isPredicated = 1;
@@ -27492,7 +27853,7 @@ def V6_vL32b_nt_cur_pred_ppu_128B : HInst<
(outs VectorRegs128B:$Vd32, IntRegs:$Rx32),
(ins PredRegs:$Pv4, IntRegs:$Rx32in, ModRegs:$Mu2),
"if ($Pv4) $Vd32.cur = vmem($Rx32++$Mu2):nt",
-CVI_VM_LD, TypeCOPROC_VMEM>, Enc_3158657, Requires<[HasV62T,UseHVX]> {
+tc_da979fb3, TypeCVI_VM_LD>, Enc_f8c1c4, Requires<[HasV62T,UseHVX]> {
let Inst{10-5} = 0b000100;
let Inst{31-21} = 0b00101011110;
let isPredicated = 1;
@@ -27512,7 +27873,7 @@ def V6_vL32b_nt_npred_ai : HInst<
(outs VectorRegs:$Vd32),
(ins PredRegs:$Pv4, IntRegs:$Rt32, s4_0Imm:$Ii),
"if (!$Pv4) $Vd32 = vmem($Rt32+#$Ii):nt",
-CVI_VM_LD, TypeCVI_VM_LD>, Enc_13338314, Requires<[HasV62T,UseHVX]> {
+tc_5cbf490b, TypeCVI_VM_LD>, Enc_8d8a30, Requires<[HasV62T,UseHVX]> {
let Inst{7-5} = 0b011;
let Inst{31-21} = 0b00101000110;
let isPredicated = 1;
@@ -27522,15 +27883,15 @@ let opNewValue = 0;
let addrMode = BaseImmOffset;
let accessSize = Vector64Access;
let isCVLoad = 1;
-let isNonTemporal = 1;
let mayLoad = 1;
+let isNonTemporal = 1;
let DecoderNamespace = "EXT_mmvec";
}
def V6_vL32b_nt_npred_ai_128B : HInst<
(outs VectorRegs128B:$Vd32),
(ins PredRegs:$Pv4, IntRegs:$Rt32, s4_0Imm:$Ii),
"if (!$Pv4) $Vd32 = vmem($Rt32+#$Ii):nt",
-CVI_VM_LD, TypeCVI_VM_LD>, Enc_738356, Requires<[HasV62T,UseHVX]> {
+tc_5cbf490b, TypeCVI_VM_LD>, Enc_8d8a30, Requires<[HasV62T,UseHVX]> {
let Inst{7-5} = 0b011;
let Inst{31-21} = 0b00101000110;
let isPredicated = 1;
@@ -27540,8 +27901,8 @@ let opNewValue = 0;
let addrMode = BaseImmOffset;
let accessSize = Vector128Access;
let isCVLoad = 1;
-let isNonTemporal = 1;
let mayLoad = 1;
+let isNonTemporal = 1;
let DecoderNamespace = "EXT_mmvec";
let isCodeGenOnly = 1;
}
@@ -27549,7 +27910,7 @@ def V6_vL32b_nt_npred_pi : HInst<
(outs VectorRegs:$Vd32, IntRegs:$Rx32),
(ins PredRegs:$Pv4, IntRegs:$Rx32in, s3_0Imm:$Ii),
"if (!$Pv4) $Vd32 = vmem($Rx32++#$Ii):nt",
-CVI_VM_LD, TypeCVI_VM_LD>, Enc_14560494, Requires<[HasV62T,UseHVX]> {
+tc_da979fb3, TypeCVI_VM_LD>, Enc_58a8bf, Requires<[HasV62T,UseHVX]> {
let Inst{7-5} = 0b011;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00101001110;
@@ -27560,8 +27921,8 @@ let opNewValue = 0;
let addrMode = PostInc;
let accessSize = Vector64Access;
let isCVLoad = 1;
-let isNonTemporal = 1;
let mayLoad = 1;
+let isNonTemporal = 1;
let DecoderNamespace = "EXT_mmvec";
let Constraints = "$Rx32 = $Rx32in";
}
@@ -27569,7 +27930,7 @@ def V6_vL32b_nt_npred_pi_128B : HInst<
(outs VectorRegs128B:$Vd32, IntRegs:$Rx32),
(ins PredRegs:$Pv4, IntRegs:$Rx32in, s3_0Imm:$Ii),
"if (!$Pv4) $Vd32 = vmem($Rx32++#$Ii):nt",
-CVI_VM_LD, TypeCVI_VM_LD>, Enc_15560488, Requires<[HasV62T,UseHVX]> {
+tc_da979fb3, TypeCVI_VM_LD>, Enc_58a8bf, Requires<[HasV62T,UseHVX]> {
let Inst{7-5} = 0b011;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00101001110;
@@ -27580,8 +27941,8 @@ let opNewValue = 0;
let addrMode = PostInc;
let accessSize = Vector128Access;
let isCVLoad = 1;
-let isNonTemporal = 1;
let mayLoad = 1;
+let isNonTemporal = 1;
let DecoderNamespace = "EXT_mmvec";
let isCodeGenOnly = 1;
let Constraints = "$Rx32 = $Rx32in";
@@ -27590,7 +27951,7 @@ def V6_vL32b_nt_npred_ppu : HInst<
(outs VectorRegs:$Vd32, IntRegs:$Rx32),
(ins PredRegs:$Pv4, IntRegs:$Rx32in, ModRegs:$Mu2),
"if (!$Pv4) $Vd32 = vmem($Rx32++$Mu2):nt",
-CVI_VM_LD, TypeCVI_VM_LD>, Enc_3158657, Requires<[HasV62T,UseHVX]> {
+tc_da979fb3, TypeCVI_VM_LD>, Enc_f8c1c4, Requires<[HasV62T,UseHVX]> {
let Inst{10-5} = 0b000011;
let Inst{31-21} = 0b00101011110;
let isPredicated = 1;
@@ -27600,8 +27961,8 @@ let opNewValue = 0;
let addrMode = PostInc;
let accessSize = Vector64Access;
let isCVLoad = 1;
-let isNonTemporal = 1;
let mayLoad = 1;
+let isNonTemporal = 1;
let DecoderNamespace = "EXT_mmvec";
let Constraints = "$Rx32 = $Rx32in";
}
@@ -27609,7 +27970,7 @@ def V6_vL32b_nt_npred_ppu_128B : HInst<
(outs VectorRegs128B:$Vd32, IntRegs:$Rx32),
(ins PredRegs:$Pv4, IntRegs:$Rx32in, ModRegs:$Mu2),
"if (!$Pv4) $Vd32 = vmem($Rx32++$Mu2):nt",
-CVI_VM_LD, TypeCVI_VM_LD>, Enc_3158657, Requires<[HasV62T,UseHVX]> {
+tc_da979fb3, TypeCVI_VM_LD>, Enc_f8c1c4, Requires<[HasV62T,UseHVX]> {
let Inst{10-5} = 0b000011;
let Inst{31-21} = 0b00101011110;
let isPredicated = 1;
@@ -27619,8 +27980,8 @@ let opNewValue = 0;
let addrMode = PostInc;
let accessSize = Vector128Access;
let isCVLoad = 1;
-let isNonTemporal = 1;
let mayLoad = 1;
+let isNonTemporal = 1;
let DecoderNamespace = "EXT_mmvec";
let isCodeGenOnly = 1;
let Constraints = "$Rx32 = $Rx32in";
@@ -27629,7 +27990,7 @@ def V6_vL32b_nt_pi : HInst<
(outs VectorRegs:$Vd32, IntRegs:$Rx32),
(ins IntRegs:$Rx32in, s3_0Imm:$Ii),
"$Vd32 = vmem($Rx32++#$Ii):nt",
-CVI_VM_LD, TypeCVI_VM_LD>, Enc_10039393, Requires<[HasV60T,UseHVX]> {
+tc_eb669007, TypeCVI_VM_LD>, Enc_a255dc, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b000;
let Inst{13-11} = 0b000;
let Inst{31-21} = 0b00101001010;
@@ -27638,8 +27999,8 @@ let opNewValue = 0;
let addrMode = PostInc;
let accessSize = Vector64Access;
let isCVLoad = 1;
-let isNonTemporal = 1;
let mayLoad = 1;
+let isNonTemporal = 1;
let isCVLoadable = 1;
let DecoderNamespace = "EXT_mmvec";
let Constraints = "$Rx32 = $Rx32in";
@@ -27648,7 +28009,7 @@ def V6_vL32b_nt_pi_128B : HInst<
(outs VectorRegs128B:$Vd32, IntRegs:$Rx32),
(ins IntRegs:$Rx32in, s3_0Imm:$Ii),
"$Vd32 = vmem($Rx32++#$Ii):nt",
-CVI_VM_LD, TypeCVI_VM_LD>, Enc_11039423, Requires<[HasV60T,UseHVX]> {
+tc_eb669007, TypeCVI_VM_LD>, Enc_a255dc, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b000;
let Inst{13-11} = 0b000;
let Inst{31-21} = 0b00101001010;
@@ -27657,8 +28018,8 @@ let opNewValue = 0;
let addrMode = PostInc;
let accessSize = Vector128Access;
let isCVLoad = 1;
-let isNonTemporal = 1;
let mayLoad = 1;
+let isNonTemporal = 1;
let isCVLoadable = 1;
let DecoderNamespace = "EXT_mmvec";
let isCodeGenOnly = 1;
@@ -27668,7 +28029,7 @@ def V6_vL32b_nt_ppu : HInst<
(outs VectorRegs:$Vd32, IntRegs:$Rx32),
(ins IntRegs:$Rx32in, ModRegs:$Mu2),
"$Vd32 = vmem($Rx32++$Mu2):nt",
-CVI_VM_LD, TypeCVI_VM_LD>, Enc_15949334, Requires<[HasV60T,UseHVX]> {
+tc_eb669007, TypeCVI_VM_LD>, Enc_2ebe3b, Requires<[HasV60T,UseHVX]> {
let Inst{12-5} = 0b00000000;
let Inst{31-21} = 0b00101011010;
let hasNewValue = 1;
@@ -27676,8 +28037,8 @@ let opNewValue = 0;
let addrMode = PostInc;
let accessSize = Vector64Access;
let isCVLoad = 1;
-let isNonTemporal = 1;
let mayLoad = 1;
+let isNonTemporal = 1;
let isCVLoadable = 1;
let DecoderNamespace = "EXT_mmvec";
let Constraints = "$Rx32 = $Rx32in";
@@ -27686,7 +28047,7 @@ def V6_vL32b_nt_ppu_128B : HInst<
(outs VectorRegs128B:$Vd32, IntRegs:$Rx32),
(ins IntRegs:$Rx32in, ModRegs:$Mu2),
"$Vd32 = vmem($Rx32++$Mu2):nt",
-CVI_VM_LD, TypeCVI_VM_LD>, Enc_15949334, Requires<[HasV60T,UseHVX]> {
+tc_eb669007, TypeCVI_VM_LD>, Enc_2ebe3b, Requires<[HasV60T,UseHVX]> {
let Inst{12-5} = 0b00000000;
let Inst{31-21} = 0b00101011010;
let hasNewValue = 1;
@@ -27694,8 +28055,8 @@ let opNewValue = 0;
let addrMode = PostInc;
let accessSize = Vector128Access;
let isCVLoad = 1;
-let isNonTemporal = 1;
let mayLoad = 1;
+let isNonTemporal = 1;
let isCVLoadable = 1;
let DecoderNamespace = "EXT_mmvec";
let isCodeGenOnly = 1;
@@ -27705,7 +28066,7 @@ def V6_vL32b_nt_pred_ai : HInst<
(outs VectorRegs:$Vd32),
(ins PredRegs:$Pv4, IntRegs:$Rt32, s4_0Imm:$Ii),
"if ($Pv4) $Vd32 = vmem($Rt32+#$Ii):nt",
-CVI_VM_LD, TypeCVI_VM_LD>, Enc_13338314, Requires<[HasV62T,UseHVX]> {
+tc_5cbf490b, TypeCVI_VM_LD>, Enc_8d8a30, Requires<[HasV62T,UseHVX]> {
let Inst{7-5} = 0b010;
let Inst{31-21} = 0b00101000110;
let isPredicated = 1;
@@ -27714,15 +28075,15 @@ let opNewValue = 0;
let addrMode = BaseImmOffset;
let accessSize = Vector64Access;
let isCVLoad = 1;
-let isNonTemporal = 1;
let mayLoad = 1;
+let isNonTemporal = 1;
let DecoderNamespace = "EXT_mmvec";
}
def V6_vL32b_nt_pred_ai_128B : HInst<
(outs VectorRegs128B:$Vd32),
(ins PredRegs:$Pv4, IntRegs:$Rt32, s4_0Imm:$Ii),
"if ($Pv4) $Vd32 = vmem($Rt32+#$Ii):nt",
-CVI_VM_LD, TypeCVI_VM_LD>, Enc_738356, Requires<[HasV62T,UseHVX]> {
+tc_5cbf490b, TypeCVI_VM_LD>, Enc_8d8a30, Requires<[HasV62T,UseHVX]> {
let Inst{7-5} = 0b010;
let Inst{31-21} = 0b00101000110;
let isPredicated = 1;
@@ -27731,8 +28092,8 @@ let opNewValue = 0;
let addrMode = BaseImmOffset;
let accessSize = Vector128Access;
let isCVLoad = 1;
-let isNonTemporal = 1;
let mayLoad = 1;
+let isNonTemporal = 1;
let DecoderNamespace = "EXT_mmvec";
let isCodeGenOnly = 1;
}
@@ -27740,7 +28101,7 @@ def V6_vL32b_nt_pred_pi : HInst<
(outs VectorRegs:$Vd32, IntRegs:$Rx32),
(ins PredRegs:$Pv4, IntRegs:$Rx32in, s3_0Imm:$Ii),
"if ($Pv4) $Vd32 = vmem($Rx32++#$Ii):nt",
-CVI_VM_LD, TypeCVI_VM_LD>, Enc_14560494, Requires<[HasV62T,UseHVX]> {
+tc_da979fb3, TypeCVI_VM_LD>, Enc_58a8bf, Requires<[HasV62T,UseHVX]> {
let Inst{7-5} = 0b010;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00101001110;
@@ -27750,8 +28111,8 @@ let opNewValue = 0;
let addrMode = PostInc;
let accessSize = Vector64Access;
let isCVLoad = 1;
-let isNonTemporal = 1;
let mayLoad = 1;
+let isNonTemporal = 1;
let DecoderNamespace = "EXT_mmvec";
let Constraints = "$Rx32 = $Rx32in";
}
@@ -27759,7 +28120,7 @@ def V6_vL32b_nt_pred_pi_128B : HInst<
(outs VectorRegs128B:$Vd32, IntRegs:$Rx32),
(ins PredRegs:$Pv4, IntRegs:$Rx32in, s3_0Imm:$Ii),
"if ($Pv4) $Vd32 = vmem($Rx32++#$Ii):nt",
-CVI_VM_LD, TypeCVI_VM_LD>, Enc_15560488, Requires<[HasV62T,UseHVX]> {
+tc_da979fb3, TypeCVI_VM_LD>, Enc_58a8bf, Requires<[HasV62T,UseHVX]> {
let Inst{7-5} = 0b010;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00101001110;
@@ -27769,8 +28130,8 @@ let opNewValue = 0;
let addrMode = PostInc;
let accessSize = Vector128Access;
let isCVLoad = 1;
-let isNonTemporal = 1;
let mayLoad = 1;
+let isNonTemporal = 1;
let DecoderNamespace = "EXT_mmvec";
let isCodeGenOnly = 1;
let Constraints = "$Rx32 = $Rx32in";
@@ -27779,7 +28140,7 @@ def V6_vL32b_nt_pred_ppu : HInst<
(outs VectorRegs:$Vd32, IntRegs:$Rx32),
(ins PredRegs:$Pv4, IntRegs:$Rx32in, ModRegs:$Mu2),
"if ($Pv4) $Vd32 = vmem($Rx32++$Mu2):nt",
-CVI_VM_LD, TypeCVI_VM_LD>, Enc_3158657, Requires<[HasV62T,UseHVX]> {
+tc_da979fb3, TypeCVI_VM_LD>, Enc_f8c1c4, Requires<[HasV62T,UseHVX]> {
let Inst{10-5} = 0b000010;
let Inst{31-21} = 0b00101011110;
let isPredicated = 1;
@@ -27788,8 +28149,8 @@ let opNewValue = 0;
let addrMode = PostInc;
let accessSize = Vector64Access;
let isCVLoad = 1;
-let isNonTemporal = 1;
let mayLoad = 1;
+let isNonTemporal = 1;
let DecoderNamespace = "EXT_mmvec";
let Constraints = "$Rx32 = $Rx32in";
}
@@ -27797,7 +28158,7 @@ def V6_vL32b_nt_pred_ppu_128B : HInst<
(outs VectorRegs128B:$Vd32, IntRegs:$Rx32),
(ins PredRegs:$Pv4, IntRegs:$Rx32in, ModRegs:$Mu2),
"if ($Pv4) $Vd32 = vmem($Rx32++$Mu2):nt",
-CVI_VM_LD, TypeCVI_VM_LD>, Enc_3158657, Requires<[HasV62T,UseHVX]> {
+tc_da979fb3, TypeCVI_VM_LD>, Enc_f8c1c4, Requires<[HasV62T,UseHVX]> {
let Inst{10-5} = 0b000010;
let Inst{31-21} = 0b00101011110;
let isPredicated = 1;
@@ -27806,8 +28167,8 @@ let opNewValue = 0;
let addrMode = PostInc;
let accessSize = Vector128Access;
let isCVLoad = 1;
-let isNonTemporal = 1;
let mayLoad = 1;
+let isNonTemporal = 1;
let DecoderNamespace = "EXT_mmvec";
let isCodeGenOnly = 1;
let Constraints = "$Rx32 = $Rx32in";
@@ -27816,7 +28177,7 @@ def V6_vL32b_nt_tmp_ai : HInst<
(outs VectorRegs:$Vd32),
(ins IntRegs:$Rt32, s4_0Imm:$Ii),
"$Vd32.tmp = vmem($Rt32+#$Ii):nt",
-CVI_VM_TMP_LD, TypeCVI_VM_TMP_LD>, Enc_1244745, Requires<[HasV60T,UseHVX]> {
+tc_77a4c701, TypeCVI_VM_TMP_LD>, Enc_f3f408, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b010;
let Inst{12-11} = 0b00;
let Inst{31-21} = 0b00101000010;
@@ -27825,15 +28186,15 @@ let opNewValue = 0;
let addrMode = BaseImmOffset;
let accessSize = Vector64Access;
let isCVLoad = 1;
-let isNonTemporal = 1;
let mayLoad = 1;
+let isNonTemporal = 1;
let DecoderNamespace = "EXT_mmvec";
}
def V6_vL32b_nt_tmp_ai_128B : HInst<
(outs VectorRegs128B:$Vd32),
(ins IntRegs:$Rt32, s4_0Imm:$Ii),
"$Vd32.tmp = vmem($Rt32+#$Ii):nt",
-CVI_VM_TMP_LD, TypeCVI_VM_TMP_LD>, Enc_8437395, Requires<[HasV60T,UseHVX]> {
+tc_77a4c701, TypeCVI_VM_TMP_LD>, Enc_f3f408, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b010;
let Inst{12-11} = 0b00;
let Inst{31-21} = 0b00101000010;
@@ -27842,8 +28203,8 @@ let opNewValue = 0;
let addrMode = BaseImmOffset;
let accessSize = Vector128Access;
let isCVLoad = 1;
-let isNonTemporal = 1;
let mayLoad = 1;
+let isNonTemporal = 1;
let DecoderNamespace = "EXT_mmvec";
let isCodeGenOnly = 1;
}
@@ -27851,7 +28212,7 @@ def V6_vL32b_nt_tmp_npred_ai : HInst<
(outs VectorRegs:$Vd32),
(ins PredRegs:$Pv4, IntRegs:$Rt32, s4_0Imm:$Ii),
"if (!$Pv4) $Vd32.tmp = vmem($Rt32+#$Ii):nt",
-CVI_VM_TMP_LD, TypeCVI_VM_TMP_LD>, Enc_13338314, Requires<[HasV62T,UseHVX]> {
+tc_51cd3aab, TypeCVI_VM_TMP_LD>, Enc_8d8a30, Requires<[HasV62T,UseHVX]> {
let Inst{7-5} = 0b111;
let Inst{31-21} = 0b00101000110;
let isPredicated = 1;
@@ -27861,15 +28222,15 @@ let opNewValue = 0;
let addrMode = BaseImmOffset;
let accessSize = Vector64Access;
let isCVLoad = 1;
-let isNonTemporal = 1;
let mayLoad = 1;
+let isNonTemporal = 1;
let DecoderNamespace = "EXT_mmvec";
}
def V6_vL32b_nt_tmp_npred_ai_128B : HInst<
(outs VectorRegs128B:$Vd32),
(ins PredRegs:$Pv4, IntRegs:$Rt32, s4_0Imm:$Ii),
"if (!$Pv4) $Vd32.tmp = vmem($Rt32+#$Ii):nt",
-CVI_VM_TMP_LD, TypeCVI_VM_TMP_LD>, Enc_738356, Requires<[HasV62T,UseHVX]> {
+tc_51cd3aab, TypeCVI_VM_TMP_LD>, Enc_8d8a30, Requires<[HasV62T,UseHVX]> {
let Inst{7-5} = 0b111;
let Inst{31-21} = 0b00101000110;
let isPredicated = 1;
@@ -27879,8 +28240,8 @@ let opNewValue = 0;
let addrMode = BaseImmOffset;
let accessSize = Vector128Access;
let isCVLoad = 1;
-let isNonTemporal = 1;
let mayLoad = 1;
+let isNonTemporal = 1;
let DecoderNamespace = "EXT_mmvec";
let isCodeGenOnly = 1;
}
@@ -27888,7 +28249,7 @@ def V6_vL32b_nt_tmp_npred_pi : HInst<
(outs VectorRegs:$Vd32, IntRegs:$Rx32),
(ins PredRegs:$Pv4, IntRegs:$Rx32in, s3_0Imm:$Ii),
"if (!$Pv4) $Vd32.tmp = vmem($Rx32++#$Ii):nt",
-CVI_VM_TMP_LD, TypeCVI_VM_TMP_LD>, Enc_14560494, Requires<[HasV62T,UseHVX]> {
+tc_38208312, TypeCVI_VM_TMP_LD>, Enc_58a8bf, Requires<[HasV62T,UseHVX]> {
let Inst{7-5} = 0b111;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00101001110;
@@ -27899,8 +28260,8 @@ let opNewValue = 0;
let addrMode = PostInc;
let accessSize = Vector64Access;
let isCVLoad = 1;
-let isNonTemporal = 1;
let mayLoad = 1;
+let isNonTemporal = 1;
let DecoderNamespace = "EXT_mmvec";
let Constraints = "$Rx32 = $Rx32in";
}
@@ -27908,7 +28269,7 @@ def V6_vL32b_nt_tmp_npred_pi_128B : HInst<
(outs VectorRegs128B:$Vd32, IntRegs:$Rx32),
(ins PredRegs:$Pv4, IntRegs:$Rx32in, s3_0Imm:$Ii),
"if (!$Pv4) $Vd32.tmp = vmem($Rx32++#$Ii):nt",
-CVI_VM_TMP_LD, TypeCVI_VM_TMP_LD>, Enc_15560488, Requires<[HasV62T,UseHVX]> {
+tc_38208312, TypeCVI_VM_TMP_LD>, Enc_58a8bf, Requires<[HasV62T,UseHVX]> {
let Inst{7-5} = 0b111;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00101001110;
@@ -27919,8 +28280,8 @@ let opNewValue = 0;
let addrMode = PostInc;
let accessSize = Vector128Access;
let isCVLoad = 1;
-let isNonTemporal = 1;
let mayLoad = 1;
+let isNonTemporal = 1;
let DecoderNamespace = "EXT_mmvec";
let isCodeGenOnly = 1;
let Constraints = "$Rx32 = $Rx32in";
@@ -27929,7 +28290,7 @@ def V6_vL32b_nt_tmp_npred_ppu : HInst<
(outs VectorRegs:$Vd32, IntRegs:$Rx32),
(ins PredRegs:$Pv4, IntRegs:$Rx32in, ModRegs:$Mu2),
"if (!$Pv4) $Vd32.tmp = vmem($Rx32++$Mu2):nt",
-CVI_VM_TMP_LD, TypeCVI_VM_TMP_LD>, Enc_3158657, Requires<[HasV62T,UseHVX]> {
+tc_38208312, TypeCVI_VM_TMP_LD>, Enc_f8c1c4, Requires<[HasV62T,UseHVX]> {
let Inst{10-5} = 0b000111;
let Inst{31-21} = 0b00101011110;
let isPredicated = 1;
@@ -27939,8 +28300,8 @@ let opNewValue = 0;
let addrMode = PostInc;
let accessSize = Vector64Access;
let isCVLoad = 1;
-let isNonTemporal = 1;
let mayLoad = 1;
+let isNonTemporal = 1;
let DecoderNamespace = "EXT_mmvec";
let Constraints = "$Rx32 = $Rx32in";
}
@@ -27948,7 +28309,7 @@ def V6_vL32b_nt_tmp_npred_ppu_128B : HInst<
(outs VectorRegs128B:$Vd32, IntRegs:$Rx32),
(ins PredRegs:$Pv4, IntRegs:$Rx32in, ModRegs:$Mu2),
"if (!$Pv4) $Vd32.tmp = vmem($Rx32++$Mu2):nt",
-CVI_VM_TMP_LD, TypeCVI_VM_TMP_LD>, Enc_3158657, Requires<[HasV62T,UseHVX]> {
+tc_38208312, TypeCVI_VM_TMP_LD>, Enc_f8c1c4, Requires<[HasV62T,UseHVX]> {
let Inst{10-5} = 0b000111;
let Inst{31-21} = 0b00101011110;
let isPredicated = 1;
@@ -27958,8 +28319,8 @@ let opNewValue = 0;
let addrMode = PostInc;
let accessSize = Vector128Access;
let isCVLoad = 1;
-let isNonTemporal = 1;
let mayLoad = 1;
+let isNonTemporal = 1;
let DecoderNamespace = "EXT_mmvec";
let isCodeGenOnly = 1;
let Constraints = "$Rx32 = $Rx32in";
@@ -27968,7 +28329,7 @@ def V6_vL32b_nt_tmp_pi : HInst<
(outs VectorRegs:$Vd32, IntRegs:$Rx32),
(ins IntRegs:$Rx32in, s3_0Imm:$Ii),
"$Vd32.tmp = vmem($Rx32++#$Ii):nt",
-CVI_VM_TMP_LD, TypeCVI_VM_TMP_LD>, Enc_10039393, Requires<[HasV60T,UseHVX]> {
+tc_9c267309, TypeCVI_VM_TMP_LD>, Enc_a255dc, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b010;
let Inst{13-11} = 0b000;
let Inst{31-21} = 0b00101001010;
@@ -27977,8 +28338,8 @@ let opNewValue = 0;
let addrMode = PostInc;
let accessSize = Vector64Access;
let isCVLoad = 1;
-let isNonTemporal = 1;
let mayLoad = 1;
+let isNonTemporal = 1;
let DecoderNamespace = "EXT_mmvec";
let Constraints = "$Rx32 = $Rx32in";
}
@@ -27986,7 +28347,7 @@ def V6_vL32b_nt_tmp_pi_128B : HInst<
(outs VectorRegs128B:$Vd32, IntRegs:$Rx32),
(ins IntRegs:$Rx32in, s3_0Imm:$Ii),
"$Vd32.tmp = vmem($Rx32++#$Ii):nt",
-CVI_VM_TMP_LD, TypeCVI_VM_TMP_LD>, Enc_11039423, Requires<[HasV60T,UseHVX]> {
+tc_9c267309, TypeCVI_VM_TMP_LD>, Enc_a255dc, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b010;
let Inst{13-11} = 0b000;
let Inst{31-21} = 0b00101001010;
@@ -27995,8 +28356,8 @@ let opNewValue = 0;
let addrMode = PostInc;
let accessSize = Vector128Access;
let isCVLoad = 1;
-let isNonTemporal = 1;
let mayLoad = 1;
+let isNonTemporal = 1;
let DecoderNamespace = "EXT_mmvec";
let isCodeGenOnly = 1;
let Constraints = "$Rx32 = $Rx32in";
@@ -28005,7 +28366,7 @@ def V6_vL32b_nt_tmp_ppu : HInst<
(outs VectorRegs:$Vd32, IntRegs:$Rx32),
(ins IntRegs:$Rx32in, ModRegs:$Mu2),
"$Vd32.tmp = vmem($Rx32++$Mu2):nt",
-CVI_VM_TMP_LD, TypeCVI_VM_TMP_LD>, Enc_15949334, Requires<[HasV60T,UseHVX]> {
+tc_9c267309, TypeCVI_VM_TMP_LD>, Enc_2ebe3b, Requires<[HasV60T,UseHVX]> {
let Inst{12-5} = 0b00000010;
let Inst{31-21} = 0b00101011010;
let hasNewValue = 1;
@@ -28013,8 +28374,8 @@ let opNewValue = 0;
let addrMode = PostInc;
let accessSize = Vector64Access;
let isCVLoad = 1;
-let isNonTemporal = 1;
let mayLoad = 1;
+let isNonTemporal = 1;
let DecoderNamespace = "EXT_mmvec";
let Constraints = "$Rx32 = $Rx32in";
}
@@ -28022,7 +28383,7 @@ def V6_vL32b_nt_tmp_ppu_128B : HInst<
(outs VectorRegs128B:$Vd32, IntRegs:$Rx32),
(ins IntRegs:$Rx32in, ModRegs:$Mu2),
"$Vd32.tmp = vmem($Rx32++$Mu2):nt",
-CVI_VM_TMP_LD, TypeCVI_VM_TMP_LD>, Enc_15949334, Requires<[HasV60T,UseHVX]> {
+tc_9c267309, TypeCVI_VM_TMP_LD>, Enc_2ebe3b, Requires<[HasV60T,UseHVX]> {
let Inst{12-5} = 0b00000010;
let Inst{31-21} = 0b00101011010;
let hasNewValue = 1;
@@ -28030,8 +28391,8 @@ let opNewValue = 0;
let addrMode = PostInc;
let accessSize = Vector128Access;
let isCVLoad = 1;
-let isNonTemporal = 1;
let mayLoad = 1;
+let isNonTemporal = 1;
let DecoderNamespace = "EXT_mmvec";
let isCodeGenOnly = 1;
let Constraints = "$Rx32 = $Rx32in";
@@ -28040,7 +28401,7 @@ def V6_vL32b_nt_tmp_pred_ai : HInst<
(outs VectorRegs:$Vd32),
(ins PredRegs:$Pv4, IntRegs:$Rt32, s4_0Imm:$Ii),
"if ($Pv4) $Vd32.tmp = vmem($Rt32+#$Ii):nt",
-CVI_VM_TMP_LD, TypeCVI_VM_TMP_LD>, Enc_13338314, Requires<[HasV62T,UseHVX]> {
+tc_51cd3aab, TypeCVI_VM_TMP_LD>, Enc_8d8a30, Requires<[HasV62T,UseHVX]> {
let Inst{7-5} = 0b110;
let Inst{31-21} = 0b00101000110;
let isPredicated = 1;
@@ -28049,15 +28410,15 @@ let opNewValue = 0;
let addrMode = BaseImmOffset;
let accessSize = Vector64Access;
let isCVLoad = 1;
-let isNonTemporal = 1;
let mayLoad = 1;
+let isNonTemporal = 1;
let DecoderNamespace = "EXT_mmvec";
}
def V6_vL32b_nt_tmp_pred_ai_128B : HInst<
(outs VectorRegs128B:$Vd32),
(ins PredRegs:$Pv4, IntRegs:$Rt32, s4_0Imm:$Ii),
"if ($Pv4) $Vd32.tmp = vmem($Rt32+#$Ii):nt",
-CVI_VM_TMP_LD, TypeCVI_VM_TMP_LD>, Enc_738356, Requires<[HasV62T,UseHVX]> {
+tc_51cd3aab, TypeCVI_VM_TMP_LD>, Enc_8d8a30, Requires<[HasV62T,UseHVX]> {
let Inst{7-5} = 0b110;
let Inst{31-21} = 0b00101000110;
let isPredicated = 1;
@@ -28066,8 +28427,8 @@ let opNewValue = 0;
let addrMode = BaseImmOffset;
let accessSize = Vector128Access;
let isCVLoad = 1;
-let isNonTemporal = 1;
let mayLoad = 1;
+let isNonTemporal = 1;
let DecoderNamespace = "EXT_mmvec";
let isCodeGenOnly = 1;
}
@@ -28075,7 +28436,7 @@ def V6_vL32b_nt_tmp_pred_pi : HInst<
(outs VectorRegs:$Vd32, IntRegs:$Rx32),
(ins PredRegs:$Pv4, IntRegs:$Rx32in, s3_0Imm:$Ii),
"if ($Pv4) $Vd32.tmp = vmem($Rx32++#$Ii):nt",
-CVI_VM_TMP_LD, TypeCVI_VM_TMP_LD>, Enc_14560494, Requires<[HasV62T,UseHVX]> {
+tc_38208312, TypeCVI_VM_TMP_LD>, Enc_58a8bf, Requires<[HasV62T,UseHVX]> {
let Inst{7-5} = 0b110;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00101001110;
@@ -28085,8 +28446,8 @@ let opNewValue = 0;
let addrMode = PostInc;
let accessSize = Vector64Access;
let isCVLoad = 1;
-let isNonTemporal = 1;
let mayLoad = 1;
+let isNonTemporal = 1;
let DecoderNamespace = "EXT_mmvec";
let Constraints = "$Rx32 = $Rx32in";
}
@@ -28094,7 +28455,7 @@ def V6_vL32b_nt_tmp_pred_pi_128B : HInst<
(outs VectorRegs128B:$Vd32, IntRegs:$Rx32),
(ins PredRegs:$Pv4, IntRegs:$Rx32in, s3_0Imm:$Ii),
"if ($Pv4) $Vd32.tmp = vmem($Rx32++#$Ii):nt",
-CVI_VM_TMP_LD, TypeCVI_VM_TMP_LD>, Enc_15560488, Requires<[HasV62T,UseHVX]> {
+tc_38208312, TypeCVI_VM_TMP_LD>, Enc_58a8bf, Requires<[HasV62T,UseHVX]> {
let Inst{7-5} = 0b110;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00101001110;
@@ -28104,8 +28465,8 @@ let opNewValue = 0;
let addrMode = PostInc;
let accessSize = Vector128Access;
let isCVLoad = 1;
-let isNonTemporal = 1;
let mayLoad = 1;
+let isNonTemporal = 1;
let DecoderNamespace = "EXT_mmvec";
let isCodeGenOnly = 1;
let Constraints = "$Rx32 = $Rx32in";
@@ -28114,7 +28475,7 @@ def V6_vL32b_nt_tmp_pred_ppu : HInst<
(outs VectorRegs:$Vd32, IntRegs:$Rx32),
(ins PredRegs:$Pv4, IntRegs:$Rx32in, ModRegs:$Mu2),
"if ($Pv4) $Vd32.tmp = vmem($Rx32++$Mu2):nt",
-CVI_VM_TMP_LD, TypeCVI_VM_TMP_LD>, Enc_3158657, Requires<[HasV62T,UseHVX]> {
+tc_38208312, TypeCVI_VM_TMP_LD>, Enc_f8c1c4, Requires<[HasV62T,UseHVX]> {
let Inst{10-5} = 0b000110;
let Inst{31-21} = 0b00101011110;
let isPredicated = 1;
@@ -28123,8 +28484,8 @@ let opNewValue = 0;
let addrMode = PostInc;
let accessSize = Vector64Access;
let isCVLoad = 1;
-let isNonTemporal = 1;
let mayLoad = 1;
+let isNonTemporal = 1;
let DecoderNamespace = "EXT_mmvec";
let Constraints = "$Rx32 = $Rx32in";
}
@@ -28132,7 +28493,7 @@ def V6_vL32b_nt_tmp_pred_ppu_128B : HInst<
(outs VectorRegs128B:$Vd32, IntRegs:$Rx32),
(ins PredRegs:$Pv4, IntRegs:$Rx32in, ModRegs:$Mu2),
"if ($Pv4) $Vd32.tmp = vmem($Rx32++$Mu2):nt",
-CVI_VM_TMP_LD, TypeCVI_VM_TMP_LD>, Enc_3158657, Requires<[HasV62T,UseHVX]> {
+tc_38208312, TypeCVI_VM_TMP_LD>, Enc_f8c1c4, Requires<[HasV62T,UseHVX]> {
let Inst{10-5} = 0b000110;
let Inst{31-21} = 0b00101011110;
let isPredicated = 1;
@@ -28141,8 +28502,8 @@ let opNewValue = 0;
let addrMode = PostInc;
let accessSize = Vector128Access;
let isCVLoad = 1;
-let isNonTemporal = 1;
let mayLoad = 1;
+let isNonTemporal = 1;
let DecoderNamespace = "EXT_mmvec";
let isCodeGenOnly = 1;
let Constraints = "$Rx32 = $Rx32in";
@@ -28151,7 +28512,7 @@ def V6_vL32b_pi : HInst<
(outs VectorRegs:$Vd32, IntRegs:$Rx32),
(ins IntRegs:$Rx32in, s3_0Imm:$Ii),
"$Vd32 = vmem($Rx32++#$Ii)",
-CVI_VM_LD, TypeCVI_VM_LD>, Enc_10039393, Requires<[HasV60T,UseHVX]> {
+tc_eb669007, TypeCVI_VM_LD>, Enc_a255dc, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b000;
let Inst{13-11} = 0b000;
let Inst{31-21} = 0b00101001000;
@@ -28169,7 +28530,7 @@ def V6_vL32b_pi_128B : HInst<
(outs VectorRegs128B:$Vd32, IntRegs:$Rx32),
(ins IntRegs:$Rx32in, s3_0Imm:$Ii),
"$Vd32 = vmem($Rx32++#$Ii)",
-CVI_VM_LD, TypeCVI_VM_LD>, Enc_11039423, Requires<[HasV60T,UseHVX]> {
+tc_eb669007, TypeCVI_VM_LD>, Enc_a255dc, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b000;
let Inst{13-11} = 0b000;
let Inst{31-21} = 0b00101001000;
@@ -28188,7 +28549,7 @@ def V6_vL32b_ppu : HInst<
(outs VectorRegs:$Vd32, IntRegs:$Rx32),
(ins IntRegs:$Rx32in, ModRegs:$Mu2),
"$Vd32 = vmem($Rx32++$Mu2)",
-CVI_VM_LD, TypeCVI_VM_LD>, Enc_15949334, Requires<[HasV60T,UseHVX]> {
+tc_eb669007, TypeCVI_VM_LD>, Enc_2ebe3b, Requires<[HasV60T,UseHVX]> {
let Inst{12-5} = 0b00000000;
let Inst{31-21} = 0b00101011000;
let hasNewValue = 1;
@@ -28205,7 +28566,7 @@ def V6_vL32b_ppu_128B : HInst<
(outs VectorRegs128B:$Vd32, IntRegs:$Rx32),
(ins IntRegs:$Rx32in, ModRegs:$Mu2),
"$Vd32 = vmem($Rx32++$Mu2)",
-CVI_VM_LD, TypeCVI_VM_LD>, Enc_15949334, Requires<[HasV60T,UseHVX]> {
+tc_eb669007, TypeCVI_VM_LD>, Enc_2ebe3b, Requires<[HasV60T,UseHVX]> {
let Inst{12-5} = 0b00000000;
let Inst{31-21} = 0b00101011000;
let hasNewValue = 1;
@@ -28223,7 +28584,7 @@ def V6_vL32b_pred_ai : HInst<
(outs VectorRegs:$Vd32),
(ins PredRegs:$Pv4, IntRegs:$Rt32, s4_0Imm:$Ii),
"if ($Pv4) $Vd32 = vmem($Rt32+#$Ii)",
-CVI_VM_LD, TypeCVI_VM_LD>, Enc_13338314, Requires<[HasV62T,UseHVX]> {
+tc_5cbf490b, TypeCVI_VM_LD>, Enc_8d8a30, Requires<[HasV62T,UseHVX]> {
let Inst{7-5} = 0b010;
let Inst{31-21} = 0b00101000100;
let isPredicated = 1;
@@ -28239,7 +28600,7 @@ def V6_vL32b_pred_ai_128B : HInst<
(outs VectorRegs128B:$Vd32),
(ins PredRegs:$Pv4, IntRegs:$Rt32, s4_0Imm:$Ii),
"if ($Pv4) $Vd32 = vmem($Rt32+#$Ii)",
-CVI_VM_LD, TypeCVI_VM_LD>, Enc_738356, Requires<[HasV62T,UseHVX]> {
+tc_5cbf490b, TypeCVI_VM_LD>, Enc_8d8a30, Requires<[HasV62T,UseHVX]> {
let Inst{7-5} = 0b010;
let Inst{31-21} = 0b00101000100;
let isPredicated = 1;
@@ -28256,7 +28617,7 @@ def V6_vL32b_pred_pi : HInst<
(outs VectorRegs:$Vd32, IntRegs:$Rx32),
(ins PredRegs:$Pv4, IntRegs:$Rx32in, s3_0Imm:$Ii),
"if ($Pv4) $Vd32 = vmem($Rx32++#$Ii)",
-CVI_VM_LD, TypeCVI_VM_LD>, Enc_14560494, Requires<[HasV62T,UseHVX]> {
+tc_da979fb3, TypeCVI_VM_LD>, Enc_58a8bf, Requires<[HasV62T,UseHVX]> {
let Inst{7-5} = 0b010;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00101001100;
@@ -28274,7 +28635,7 @@ def V6_vL32b_pred_pi_128B : HInst<
(outs VectorRegs128B:$Vd32, IntRegs:$Rx32),
(ins PredRegs:$Pv4, IntRegs:$Rx32in, s3_0Imm:$Ii),
"if ($Pv4) $Vd32 = vmem($Rx32++#$Ii)",
-CVI_VM_LD, TypeCVI_VM_LD>, Enc_15560488, Requires<[HasV62T,UseHVX]> {
+tc_da979fb3, TypeCVI_VM_LD>, Enc_58a8bf, Requires<[HasV62T,UseHVX]> {
let Inst{7-5} = 0b010;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00101001100;
@@ -28293,7 +28654,7 @@ def V6_vL32b_pred_ppu : HInst<
(outs VectorRegs:$Vd32, IntRegs:$Rx32),
(ins PredRegs:$Pv4, IntRegs:$Rx32in, ModRegs:$Mu2),
"if ($Pv4) $Vd32 = vmem($Rx32++$Mu2)",
-CVI_VM_LD, TypeCVI_VM_LD>, Enc_3158657, Requires<[HasV62T,UseHVX]> {
+tc_da979fb3, TypeCVI_VM_LD>, Enc_f8c1c4, Requires<[HasV62T,UseHVX]> {
let Inst{10-5} = 0b000010;
let Inst{31-21} = 0b00101011100;
let isPredicated = 1;
@@ -28310,7 +28671,7 @@ def V6_vL32b_pred_ppu_128B : HInst<
(outs VectorRegs128B:$Vd32, IntRegs:$Rx32),
(ins PredRegs:$Pv4, IntRegs:$Rx32in, ModRegs:$Mu2),
"if ($Pv4) $Vd32 = vmem($Rx32++$Mu2)",
-CVI_VM_LD, TypeCVI_VM_LD>, Enc_3158657, Requires<[HasV62T,UseHVX]> {
+tc_da979fb3, TypeCVI_VM_LD>, Enc_f8c1c4, Requires<[HasV62T,UseHVX]> {
let Inst{10-5} = 0b000010;
let Inst{31-21} = 0b00101011100;
let isPredicated = 1;
@@ -28328,7 +28689,7 @@ def V6_vL32b_tmp_ai : HInst<
(outs VectorRegs:$Vd32),
(ins IntRegs:$Rt32, s4_0Imm:$Ii),
"$Vd32.tmp = vmem($Rt32+#$Ii)",
-CVI_VM_TMP_LD, TypeCVI_VM_TMP_LD>, Enc_1244745, Requires<[HasV60T,UseHVX]> {
+tc_77a4c701, TypeCVI_VM_TMP_LD>, Enc_f3f408, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b010;
let Inst{12-11} = 0b00;
let Inst{31-21} = 0b00101000000;
@@ -28344,7 +28705,7 @@ def V6_vL32b_tmp_ai_128B : HInst<
(outs VectorRegs128B:$Vd32),
(ins IntRegs:$Rt32, s4_0Imm:$Ii),
"$Vd32.tmp = vmem($Rt32+#$Ii)",
-CVI_VM_TMP_LD, TypeCVI_VM_TMP_LD>, Enc_8437395, Requires<[HasV60T,UseHVX]> {
+tc_77a4c701, TypeCVI_VM_TMP_LD>, Enc_f3f408, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b010;
let Inst{12-11} = 0b00;
let Inst{31-21} = 0b00101000000;
@@ -28361,7 +28722,7 @@ def V6_vL32b_tmp_npred_ai : HInst<
(outs VectorRegs:$Vd32),
(ins PredRegs:$Pv4, IntRegs:$Rt32, s4_0Imm:$Ii),
"if (!$Pv4) $Vd32.tmp = vmem($Rt32+#$Ii)",
-CVI_VM_TMP_LD, TypeCVI_VM_TMP_LD>, Enc_13338314, Requires<[HasV62T,UseHVX]> {
+tc_51cd3aab, TypeCVI_VM_TMP_LD>, Enc_8d8a30, Requires<[HasV62T,UseHVX]> {
let Inst{7-5} = 0b111;
let Inst{31-21} = 0b00101000100;
let isPredicated = 1;
@@ -28378,7 +28739,7 @@ def V6_vL32b_tmp_npred_ai_128B : HInst<
(outs VectorRegs128B:$Vd32),
(ins PredRegs:$Pv4, IntRegs:$Rt32, s4_0Imm:$Ii),
"if (!$Pv4) $Vd32.tmp = vmem($Rt32+#$Ii)",
-CVI_VM_TMP_LD, TypeCVI_VM_TMP_LD>, Enc_738356, Requires<[HasV62T,UseHVX]> {
+tc_51cd3aab, TypeCVI_VM_TMP_LD>, Enc_8d8a30, Requires<[HasV62T,UseHVX]> {
let Inst{7-5} = 0b111;
let Inst{31-21} = 0b00101000100;
let isPredicated = 1;
@@ -28396,7 +28757,7 @@ def V6_vL32b_tmp_npred_pi : HInst<
(outs VectorRegs:$Vd32, IntRegs:$Rx32),
(ins PredRegs:$Pv4, IntRegs:$Rx32in, s3_0Imm:$Ii),
"if (!$Pv4) $Vd32.tmp = vmem($Rx32++#$Ii)",
-CVI_VM_TMP_LD, TypeCVI_VM_TMP_LD>, Enc_14560494, Requires<[HasV62T,UseHVX]> {
+tc_38208312, TypeCVI_VM_TMP_LD>, Enc_58a8bf, Requires<[HasV62T,UseHVX]> {
let Inst{7-5} = 0b111;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00101001100;
@@ -28415,7 +28776,7 @@ def V6_vL32b_tmp_npred_pi_128B : HInst<
(outs VectorRegs128B:$Vd32, IntRegs:$Rx32),
(ins PredRegs:$Pv4, IntRegs:$Rx32in, s3_0Imm:$Ii),
"if (!$Pv4) $Vd32.tmp = vmem($Rx32++#$Ii)",
-CVI_VM_TMP_LD, TypeCVI_VM_TMP_LD>, Enc_15560488, Requires<[HasV62T,UseHVX]> {
+tc_38208312, TypeCVI_VM_TMP_LD>, Enc_58a8bf, Requires<[HasV62T,UseHVX]> {
let Inst{7-5} = 0b111;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00101001100;
@@ -28435,7 +28796,7 @@ def V6_vL32b_tmp_npred_ppu : HInst<
(outs VectorRegs:$Vd32, IntRegs:$Rx32),
(ins PredRegs:$Pv4, IntRegs:$Rx32in, ModRegs:$Mu2),
"if (!$Pv4) $Vd32.tmp = vmem($Rx32++$Mu2)",
-CVI_VM_TMP_LD, TypeCVI_VM_TMP_LD>, Enc_3158657, Requires<[HasV62T,UseHVX]> {
+tc_38208312, TypeCVI_VM_TMP_LD>, Enc_f8c1c4, Requires<[HasV62T,UseHVX]> {
let Inst{10-5} = 0b000111;
let Inst{31-21} = 0b00101011100;
let isPredicated = 1;
@@ -28453,7 +28814,7 @@ def V6_vL32b_tmp_npred_ppu_128B : HInst<
(outs VectorRegs128B:$Vd32, IntRegs:$Rx32),
(ins PredRegs:$Pv4, IntRegs:$Rx32in, ModRegs:$Mu2),
"if (!$Pv4) $Vd32.tmp = vmem($Rx32++$Mu2)",
-CVI_VM_TMP_LD, TypeCVI_VM_TMP_LD>, Enc_3158657, Requires<[HasV62T,UseHVX]> {
+tc_38208312, TypeCVI_VM_TMP_LD>, Enc_f8c1c4, Requires<[HasV62T,UseHVX]> {
let Inst{10-5} = 0b000111;
let Inst{31-21} = 0b00101011100;
let isPredicated = 1;
@@ -28472,7 +28833,7 @@ def V6_vL32b_tmp_pi : HInst<
(outs VectorRegs:$Vd32, IntRegs:$Rx32),
(ins IntRegs:$Rx32in, s3_0Imm:$Ii),
"$Vd32.tmp = vmem($Rx32++#$Ii)",
-CVI_VM_TMP_LD, TypeCVI_VM_TMP_LD>, Enc_10039393, Requires<[HasV60T,UseHVX]> {
+tc_9c267309, TypeCVI_VM_TMP_LD>, Enc_a255dc, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b010;
let Inst{13-11} = 0b000;
let Inst{31-21} = 0b00101001000;
@@ -28489,7 +28850,7 @@ def V6_vL32b_tmp_pi_128B : HInst<
(outs VectorRegs128B:$Vd32, IntRegs:$Rx32),
(ins IntRegs:$Rx32in, s3_0Imm:$Ii),
"$Vd32.tmp = vmem($Rx32++#$Ii)",
-CVI_VM_TMP_LD, TypeCVI_VM_TMP_LD>, Enc_11039423, Requires<[HasV60T,UseHVX]> {
+tc_9c267309, TypeCVI_VM_TMP_LD>, Enc_a255dc, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b010;
let Inst{13-11} = 0b000;
let Inst{31-21} = 0b00101001000;
@@ -28507,7 +28868,7 @@ def V6_vL32b_tmp_ppu : HInst<
(outs VectorRegs:$Vd32, IntRegs:$Rx32),
(ins IntRegs:$Rx32in, ModRegs:$Mu2),
"$Vd32.tmp = vmem($Rx32++$Mu2)",
-CVI_VM_TMP_LD, TypeCVI_VM_TMP_LD>, Enc_15949334, Requires<[HasV60T,UseHVX]> {
+tc_9c267309, TypeCVI_VM_TMP_LD>, Enc_2ebe3b, Requires<[HasV60T,UseHVX]> {
let Inst{12-5} = 0b00000010;
let Inst{31-21} = 0b00101011000;
let hasNewValue = 1;
@@ -28523,7 +28884,7 @@ def V6_vL32b_tmp_ppu_128B : HInst<
(outs VectorRegs128B:$Vd32, IntRegs:$Rx32),
(ins IntRegs:$Rx32in, ModRegs:$Mu2),
"$Vd32.tmp = vmem($Rx32++$Mu2)",
-CVI_VM_TMP_LD, TypeCVI_VM_TMP_LD>, Enc_15949334, Requires<[HasV60T,UseHVX]> {
+tc_9c267309, TypeCVI_VM_TMP_LD>, Enc_2ebe3b, Requires<[HasV60T,UseHVX]> {
let Inst{12-5} = 0b00000010;
let Inst{31-21} = 0b00101011000;
let hasNewValue = 1;
@@ -28540,7 +28901,7 @@ def V6_vL32b_tmp_pred_ai : HInst<
(outs VectorRegs:$Vd32),
(ins PredRegs:$Pv4, IntRegs:$Rt32, s4_0Imm:$Ii),
"if ($Pv4) $Vd32.tmp = vmem($Rt32+#$Ii)",
-CVI_VM_TMP_LD, TypeCVI_VM_TMP_LD>, Enc_13338314, Requires<[HasV62T,UseHVX]> {
+tc_51cd3aab, TypeCVI_VM_TMP_LD>, Enc_8d8a30, Requires<[HasV62T,UseHVX]> {
let Inst{7-5} = 0b110;
let Inst{31-21} = 0b00101000100;
let isPredicated = 1;
@@ -28556,7 +28917,7 @@ def V6_vL32b_tmp_pred_ai_128B : HInst<
(outs VectorRegs128B:$Vd32),
(ins PredRegs:$Pv4, IntRegs:$Rt32, s4_0Imm:$Ii),
"if ($Pv4) $Vd32.tmp = vmem($Rt32+#$Ii)",
-CVI_VM_TMP_LD, TypeCVI_VM_TMP_LD>, Enc_738356, Requires<[HasV62T,UseHVX]> {
+tc_51cd3aab, TypeCVI_VM_TMP_LD>, Enc_8d8a30, Requires<[HasV62T,UseHVX]> {
let Inst{7-5} = 0b110;
let Inst{31-21} = 0b00101000100;
let isPredicated = 1;
@@ -28573,7 +28934,7 @@ def V6_vL32b_tmp_pred_pi : HInst<
(outs VectorRegs:$Vd32, IntRegs:$Rx32),
(ins PredRegs:$Pv4, IntRegs:$Rx32in, s3_0Imm:$Ii),
"if ($Pv4) $Vd32.tmp = vmem($Rx32++#$Ii)",
-CVI_VM_TMP_LD, TypeCVI_VM_TMP_LD>, Enc_14560494, Requires<[HasV62T,UseHVX]> {
+tc_38208312, TypeCVI_VM_TMP_LD>, Enc_58a8bf, Requires<[HasV62T,UseHVX]> {
let Inst{7-5} = 0b110;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00101001100;
@@ -28591,7 +28952,7 @@ def V6_vL32b_tmp_pred_pi_128B : HInst<
(outs VectorRegs128B:$Vd32, IntRegs:$Rx32),
(ins PredRegs:$Pv4, IntRegs:$Rx32in, s3_0Imm:$Ii),
"if ($Pv4) $Vd32.tmp = vmem($Rx32++#$Ii)",
-CVI_VM_TMP_LD, TypeCVI_VM_TMP_LD>, Enc_15560488, Requires<[HasV62T,UseHVX]> {
+tc_38208312, TypeCVI_VM_TMP_LD>, Enc_58a8bf, Requires<[HasV62T,UseHVX]> {
let Inst{7-5} = 0b110;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00101001100;
@@ -28610,7 +28971,7 @@ def V6_vL32b_tmp_pred_ppu : HInst<
(outs VectorRegs:$Vd32, IntRegs:$Rx32),
(ins PredRegs:$Pv4, IntRegs:$Rx32in, ModRegs:$Mu2),
"if ($Pv4) $Vd32.tmp = vmem($Rx32++$Mu2)",
-CVI_VM_TMP_LD, TypeCVI_VM_TMP_LD>, Enc_3158657, Requires<[HasV62T,UseHVX]> {
+tc_38208312, TypeCVI_VM_TMP_LD>, Enc_f8c1c4, Requires<[HasV62T,UseHVX]> {
let Inst{10-5} = 0b000110;
let Inst{31-21} = 0b00101011100;
let isPredicated = 1;
@@ -28627,7 +28988,7 @@ def V6_vL32b_tmp_pred_ppu_128B : HInst<
(outs VectorRegs128B:$Vd32, IntRegs:$Rx32),
(ins PredRegs:$Pv4, IntRegs:$Rx32in, ModRegs:$Mu2),
"if ($Pv4) $Vd32.tmp = vmem($Rx32++$Mu2)",
-CVI_VM_TMP_LD, TypeCVI_VM_TMP_LD>, Enc_3158657, Requires<[HasV62T,UseHVX]> {
+tc_38208312, TypeCVI_VM_TMP_LD>, Enc_f8c1c4, Requires<[HasV62T,UseHVX]> {
let Inst{10-5} = 0b000110;
let Inst{31-21} = 0b00101011100;
let isPredicated = 1;
@@ -28645,7 +29006,7 @@ def V6_vS32Ub_ai : HInst<
(outs),
(ins IntRegs:$Rt32, s4_0Imm:$Ii, VectorRegs:$Vs32),
"vmemu($Rt32+#$Ii) = $Vs32",
-CVI_VM_STU, TypeCVI_VM_STU>, Enc_6923828, Requires<[HasV60T,UseHVX]>, NewValueRel {
+tc_354299ad, TypeCVI_VM_STU>, Enc_c9e3bc, Requires<[HasV60T,UseHVX]>, NewValueRel {
let Inst{7-5} = 0b111;
let Inst{12-11} = 0b00;
let Inst{31-21} = 0b00101000001;
@@ -28660,7 +29021,7 @@ def V6_vS32Ub_ai_128B : HInst<
(outs),
(ins IntRegs:$Rt32, s4_0Imm:$Ii, VectorRegs128B:$Vs32),
"vmemu($Rt32+#$Ii) = $Vs32",
-CVI_VM_STU, TypeCVI_VM_STU>, Enc_5757366, Requires<[HasV60T,UseHVX]>, NewValueRel {
+tc_354299ad, TypeCVI_VM_STU>, Enc_c9e3bc, Requires<[HasV60T,UseHVX]>, NewValueRel {
let Inst{7-5} = 0b111;
let Inst{12-11} = 0b00;
let Inst{31-21} = 0b00101000001;
@@ -28676,7 +29037,7 @@ def V6_vS32Ub_npred_ai : HInst<
(outs),
(ins PredRegs:$Pv4, IntRegs:$Rt32, s4_0Imm:$Ii, VectorRegs:$Vs32),
"if (!$Pv4) vmemu($Rt32+#$Ii) = $Vs32",
-CVI_VM_STU, TypeCVI_VM_STU>, Enc_10075393, Requires<[HasV60T,UseHVX]>, NewValueRel {
+tc_d642eff3, TypeCVI_VM_STU>, Enc_27b757, Requires<[HasV60T,UseHVX]>, NewValueRel {
let Inst{7-5} = 0b111;
let Inst{31-21} = 0b00101000101;
let isPredicated = 1;
@@ -28691,7 +29052,7 @@ def V6_vS32Ub_npred_ai_128B : HInst<
(outs),
(ins PredRegs:$Pv4, IntRegs:$Rt32, s4_0Imm:$Ii, VectorRegs128B:$Vs32),
"if (!$Pv4) vmemu($Rt32+#$Ii) = $Vs32",
-CVI_VM_STU, TypeCVI_VM_STU>, Enc_9470751, Requires<[HasV60T,UseHVX]>, NewValueRel {
+tc_d642eff3, TypeCVI_VM_STU>, Enc_27b757, Requires<[HasV60T,UseHVX]>, NewValueRel {
let Inst{7-5} = 0b111;
let Inst{31-21} = 0b00101000101;
let isPredicated = 1;
@@ -28707,7 +29068,7 @@ def V6_vS32Ub_npred_pi : HInst<
(outs IntRegs:$Rx32),
(ins PredRegs:$Pv4, IntRegs:$Rx32in, s3_0Imm:$Ii, VectorRegs:$Vs32),
"if (!$Pv4) vmemu($Rx32++#$Ii) = $Vs32",
-CVI_VM_STU, TypeCVI_VM_STU>, Enc_15459921, Requires<[HasV60T,UseHVX]>, NewValueRel {
+tc_6fd9ad30, TypeCVI_VM_STU>, Enc_865390, Requires<[HasV60T,UseHVX]>, NewValueRel {
let Inst{7-5} = 0b111;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00101001101;
@@ -28724,7 +29085,7 @@ def V6_vS32Ub_npred_pi_128B : HInst<
(outs IntRegs:$Rx32),
(ins PredRegs:$Pv4, IntRegs:$Rx32in, s3_0Imm:$Ii, VectorRegs128B:$Vs32),
"if (!$Pv4) vmemu($Rx32++#$Ii) = $Vs32",
-CVI_VM_STU, TypeCVI_VM_STU>, Enc_14459927, Requires<[HasV60T,UseHVX]>, NewValueRel {
+tc_6fd9ad30, TypeCVI_VM_STU>, Enc_865390, Requires<[HasV60T,UseHVX]>, NewValueRel {
let Inst{7-5} = 0b111;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00101001101;
@@ -28742,7 +29103,7 @@ def V6_vS32Ub_npred_ppu : HInst<
(outs IntRegs:$Rx32),
(ins PredRegs:$Pv4, IntRegs:$Rx32in, ModRegs:$Mu2, VectorRegs:$Vs32),
"if (!$Pv4) vmemu($Rx32++$Mu2) = $Vs32",
-CVI_VM_STU, TypeCVI_VM_STU>, Enc_15733946, Requires<[HasV60T,UseHVX]>, NewValueRel {
+tc_6fd9ad30, TypeCVI_VM_STU>, Enc_1ef990, Requires<[HasV60T,UseHVX]>, NewValueRel {
let Inst{10-5} = 0b000111;
let Inst{31-21} = 0b00101011101;
let isPredicated = 1;
@@ -28758,7 +29119,7 @@ def V6_vS32Ub_npred_ppu_128B : HInst<
(outs IntRegs:$Rx32),
(ins PredRegs:$Pv4, IntRegs:$Rx32in, ModRegs:$Mu2, VectorRegs128B:$Vs32),
"if (!$Pv4) vmemu($Rx32++$Mu2) = $Vs32",
-CVI_VM_STU, TypeCVI_VM_STU>, Enc_15733946, Requires<[HasV60T,UseHVX]>, NewValueRel {
+tc_6fd9ad30, TypeCVI_VM_STU>, Enc_1ef990, Requires<[HasV60T,UseHVX]>, NewValueRel {
let Inst{10-5} = 0b000111;
let Inst{31-21} = 0b00101011101;
let isPredicated = 1;
@@ -28775,7 +29136,7 @@ def V6_vS32Ub_pi : HInst<
(outs IntRegs:$Rx32),
(ins IntRegs:$Rx32in, s3_0Imm:$Ii, VectorRegs:$Vs32),
"vmemu($Rx32++#$Ii) = $Vs32",
-CVI_VM_STU, TypeCVI_VM_STU>, Enc_3296020, Requires<[HasV60T,UseHVX]>, NewValueRel {
+tc_7fa82b08, TypeCVI_VM_STU>, Enc_b62ef7, Requires<[HasV60T,UseHVX]>, NewValueRel {
let Inst{7-5} = 0b111;
let Inst{13-11} = 0b000;
let Inst{31-21} = 0b00101001001;
@@ -28791,7 +29152,7 @@ def V6_vS32Ub_pi_128B : HInst<
(outs IntRegs:$Rx32),
(ins IntRegs:$Rx32in, s3_0Imm:$Ii, VectorRegs128B:$Vs32),
"vmemu($Rx32++#$Ii) = $Vs32",
-CVI_VM_STU, TypeCVI_VM_STU>, Enc_2296022, Requires<[HasV60T,UseHVX]>, NewValueRel {
+tc_7fa82b08, TypeCVI_VM_STU>, Enc_b62ef7, Requires<[HasV60T,UseHVX]>, NewValueRel {
let Inst{7-5} = 0b111;
let Inst{13-11} = 0b000;
let Inst{31-21} = 0b00101001001;
@@ -28808,7 +29169,7 @@ def V6_vS32Ub_ppu : HInst<
(outs IntRegs:$Rx32),
(ins IntRegs:$Rx32in, ModRegs:$Mu2, VectorRegs:$Vs32),
"vmemu($Rx32++$Mu2) = $Vs32",
-CVI_VM_STU, TypeCVI_VM_STU>, Enc_11281763, Requires<[HasV60T,UseHVX]>, NewValueRel {
+tc_7fa82b08, TypeCVI_VM_STU>, Enc_d15d19, Requires<[HasV60T,UseHVX]>, NewValueRel {
let Inst{12-5} = 0b00000111;
let Inst{31-21} = 0b00101011001;
let addrMode = PostInc;
@@ -28823,7 +29184,7 @@ def V6_vS32Ub_ppu_128B : HInst<
(outs IntRegs:$Rx32),
(ins IntRegs:$Rx32in, ModRegs:$Mu2, VectorRegs128B:$Vs32),
"vmemu($Rx32++$Mu2) = $Vs32",
-CVI_VM_STU, TypeCVI_VM_STU>, Enc_11281763, Requires<[HasV60T,UseHVX]>, NewValueRel {
+tc_7fa82b08, TypeCVI_VM_STU>, Enc_d15d19, Requires<[HasV60T,UseHVX]>, NewValueRel {
let Inst{12-5} = 0b00000111;
let Inst{31-21} = 0b00101011001;
let addrMode = PostInc;
@@ -28839,7 +29200,7 @@ def V6_vS32Ub_pred_ai : HInst<
(outs),
(ins PredRegs:$Pv4, IntRegs:$Rt32, s4_0Imm:$Ii, VectorRegs:$Vs32),
"if ($Pv4) vmemu($Rt32+#$Ii) = $Vs32",
-CVI_VM_STU, TypeCVI_VM_STU>, Enc_10075393, Requires<[HasV60T,UseHVX]>, NewValueRel {
+tc_d642eff3, TypeCVI_VM_STU>, Enc_27b757, Requires<[HasV60T,UseHVX]>, NewValueRel {
let Inst{7-5} = 0b110;
let Inst{31-21} = 0b00101000101;
let isPredicated = 1;
@@ -28853,7 +29214,7 @@ def V6_vS32Ub_pred_ai_128B : HInst<
(outs),
(ins PredRegs:$Pv4, IntRegs:$Rt32, s4_0Imm:$Ii, VectorRegs128B:$Vs32),
"if ($Pv4) vmemu($Rt32+#$Ii) = $Vs32",
-CVI_VM_STU, TypeCVI_VM_STU>, Enc_9470751, Requires<[HasV60T,UseHVX]>, NewValueRel {
+tc_d642eff3, TypeCVI_VM_STU>, Enc_27b757, Requires<[HasV60T,UseHVX]>, NewValueRel {
let Inst{7-5} = 0b110;
let Inst{31-21} = 0b00101000101;
let isPredicated = 1;
@@ -28868,7 +29229,7 @@ def V6_vS32Ub_pred_pi : HInst<
(outs IntRegs:$Rx32),
(ins PredRegs:$Pv4, IntRegs:$Rx32in, s3_0Imm:$Ii, VectorRegs:$Vs32),
"if ($Pv4) vmemu($Rx32++#$Ii) = $Vs32",
-CVI_VM_STU, TypeCVI_VM_STU>, Enc_15459921, Requires<[HasV60T,UseHVX]>, NewValueRel {
+tc_6fd9ad30, TypeCVI_VM_STU>, Enc_865390, Requires<[HasV60T,UseHVX]>, NewValueRel {
let Inst{7-5} = 0b110;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00101001101;
@@ -28884,7 +29245,7 @@ def V6_vS32Ub_pred_pi_128B : HInst<
(outs IntRegs:$Rx32),
(ins PredRegs:$Pv4, IntRegs:$Rx32in, s3_0Imm:$Ii, VectorRegs128B:$Vs32),
"if ($Pv4) vmemu($Rx32++#$Ii) = $Vs32",
-CVI_VM_STU, TypeCVI_VM_STU>, Enc_14459927, Requires<[HasV60T,UseHVX]>, NewValueRel {
+tc_6fd9ad30, TypeCVI_VM_STU>, Enc_865390, Requires<[HasV60T,UseHVX]>, NewValueRel {
let Inst{7-5} = 0b110;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00101001101;
@@ -28901,7 +29262,7 @@ def V6_vS32Ub_pred_ppu : HInst<
(outs IntRegs:$Rx32),
(ins PredRegs:$Pv4, IntRegs:$Rx32in, ModRegs:$Mu2, VectorRegs:$Vs32),
"if ($Pv4) vmemu($Rx32++$Mu2) = $Vs32",
-CVI_VM_STU, TypeCVI_VM_STU>, Enc_15733946, Requires<[HasV60T,UseHVX]>, NewValueRel {
+tc_6fd9ad30, TypeCVI_VM_STU>, Enc_1ef990, Requires<[HasV60T,UseHVX]>, NewValueRel {
let Inst{10-5} = 0b000110;
let Inst{31-21} = 0b00101011101;
let isPredicated = 1;
@@ -28916,7 +29277,7 @@ def V6_vS32Ub_pred_ppu_128B : HInst<
(outs IntRegs:$Rx32),
(ins PredRegs:$Pv4, IntRegs:$Rx32in, ModRegs:$Mu2, VectorRegs128B:$Vs32),
"if ($Pv4) vmemu($Rx32++$Mu2) = $Vs32",
-CVI_VM_STU, TypeCVI_VM_STU>, Enc_15733946, Requires<[HasV60T,UseHVX]>, NewValueRel {
+tc_6fd9ad30, TypeCVI_VM_STU>, Enc_1ef990, Requires<[HasV60T,UseHVX]>, NewValueRel {
let Inst{10-5} = 0b000110;
let Inst{31-21} = 0b00101011101;
let isPredicated = 1;
@@ -28932,7 +29293,7 @@ def V6_vS32b_ai : HInst<
(outs),
(ins IntRegs:$Rt32, s4_0Imm:$Ii, VectorRegs:$Vs32),
"vmem($Rt32+#$Ii) = $Vs32",
-CVI_VM_ST, TypeCVI_VM_ST>, Enc_6923828, Requires<[HasV60T,UseHVX]>, NewValueRel {
+tc_e3748cdf, TypeCVI_VM_ST>, Enc_c9e3bc, Requires<[HasV60T,UseHVX]>, NewValueRel {
let Inst{7-5} = 0b000;
let Inst{12-11} = 0b00;
let Inst{31-21} = 0b00101000001;
@@ -28948,7 +29309,7 @@ def V6_vS32b_ai_128B : HInst<
(outs),
(ins IntRegs:$Rt32, s4_0Imm:$Ii, VectorRegs128B:$Vs32),
"vmem($Rt32+#$Ii) = $Vs32",
-CVI_VM_ST, TypeCVI_VM_ST>, Enc_5757366, Requires<[HasV60T,UseHVX]>, NewValueRel {
+tc_e3748cdf, TypeCVI_VM_ST>, Enc_c9e3bc, Requires<[HasV60T,UseHVX]>, NewValueRel {
let Inst{7-5} = 0b000;
let Inst{12-11} = 0b00;
let Inst{31-21} = 0b00101000001;
@@ -28965,7 +29326,7 @@ def V6_vS32b_new_ai : HInst<
(outs),
(ins IntRegs:$Rt32, s4_0Imm:$Ii, VectorRegs:$Os8),
"vmem($Rt32+#$Ii) = $Os8.new",
-CVI_VM_NEW_ST, TypeCVI_VM_NEW_ST>, Enc_6608821, Requires<[HasV60T,UseHVX]>, NewValueRel {
+tc_1b93bdc6, TypeCVI_VM_NEW_ST>, Enc_f77fbc, Requires<[HasV60T,UseHVX]>, NewValueRel {
let Inst{7-3} = 0b00100;
let Inst{12-11} = 0b00;
let Inst{31-21} = 0b00101000001;
@@ -28984,7 +29345,7 @@ def V6_vS32b_new_ai_128B : HInst<
(outs),
(ins IntRegs:$Rt32, s4_0Imm:$Ii, VectorRegs128B:$Os8),
"vmem($Rt32+#$Ii) = $Os8.new",
-CVI_VM_NEW_ST, TypeCVI_VM_NEW_ST>, Enc_2152247, Requires<[HasV60T,UseHVX]>, NewValueRel {
+tc_1b93bdc6, TypeCVI_VM_NEW_ST>, Enc_f77fbc, Requires<[HasV60T,UseHVX]>, NewValueRel {
let Inst{7-3} = 0b00100;
let Inst{12-11} = 0b00;
let Inst{31-21} = 0b00101000001;
@@ -29004,7 +29365,7 @@ def V6_vS32b_new_npred_ai : HInst<
(outs),
(ins PredRegs:$Pv4, IntRegs:$Rt32, s4_0Imm:$Ii, VectorRegs:$Os8),
"if (!$Pv4) vmem($Rt32+#$Ii) = $Os8.new",
-CVI_VM_NEW_ST, TypeCVI_VM_NEW_ST>, Enc_9372046, Requires<[HasV60T,UseHVX]>, NewValueRel {
+tc_d5090f3e, TypeCVI_VM_NEW_ST>, Enc_f7430e, Requires<[HasV60T,UseHVX]>, NewValueRel {
let Inst{7-3} = 0b01101;
let Inst{31-21} = 0b00101000101;
let isPredicated = 1;
@@ -29023,7 +29384,7 @@ def V6_vS32b_new_npred_ai_128B : HInst<
(outs),
(ins PredRegs:$Pv4, IntRegs:$Rt32, s4_0Imm:$Ii, VectorRegs128B:$Os8),
"if (!$Pv4) vmem($Rt32+#$Ii) = $Os8.new",
-CVI_VM_NEW_ST, TypeCVI_VM_NEW_ST>, Enc_13937564, Requires<[HasV60T,UseHVX]>, NewValueRel {
+tc_d5090f3e, TypeCVI_VM_NEW_ST>, Enc_f7430e, Requires<[HasV60T,UseHVX]>, NewValueRel {
let Inst{7-3} = 0b01101;
let Inst{31-21} = 0b00101000101;
let isPredicated = 1;
@@ -29043,7 +29404,7 @@ def V6_vS32b_new_npred_pi : HInst<
(outs IntRegs:$Rx32),
(ins PredRegs:$Pv4, IntRegs:$Rx32in, s3_0Imm:$Ii, VectorRegs:$Os8),
"if (!$Pv4) vmem($Rx32++#$Ii) = $Os8.new",
-CVI_VM_NEW_ST, TypeCVI_VM_NEW_ST>, Enc_3735566, Requires<[HasV60T,UseHVX]>, NewValueRel {
+tc_8b6a873f, TypeCVI_VM_NEW_ST>, Enc_784502, Requires<[HasV60T,UseHVX]>, NewValueRel {
let Inst{7-3} = 0b01101;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00101001101;
@@ -29064,7 +29425,7 @@ def V6_vS32b_new_npred_pi_128B : HInst<
(outs IntRegs:$Rx32),
(ins PredRegs:$Pv4, IntRegs:$Rx32in, s3_0Imm:$Ii, VectorRegs128B:$Os8),
"if (!$Pv4) vmem($Rx32++#$Ii) = $Os8.new",
-CVI_VM_NEW_ST, TypeCVI_VM_NEW_ST>, Enc_2735552, Requires<[HasV60T,UseHVX]>, NewValueRel {
+tc_8b6a873f, TypeCVI_VM_NEW_ST>, Enc_784502, Requires<[HasV60T,UseHVX]>, NewValueRel {
let Inst{7-3} = 0b01101;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00101001101;
@@ -29086,7 +29447,7 @@ def V6_vS32b_new_npred_ppu : HInst<
(outs IntRegs:$Rx32),
(ins PredRegs:$Pv4, IntRegs:$Rx32in, ModRegs:$Mu2, VectorRegs:$Os8),
"if (!$Pv4) vmem($Rx32++$Mu2) = $Os8.new",
-CVI_VM_NEW_ST, TypeCVI_VM_NEW_ST>, Enc_8498433, Requires<[HasV60T,UseHVX]>, NewValueRel {
+tc_8b6a873f, TypeCVI_VM_NEW_ST>, Enc_372c9d, Requires<[HasV60T,UseHVX]>, NewValueRel {
let Inst{10-3} = 0b00001101;
let Inst{31-21} = 0b00101011101;
let isPredicated = 1;
@@ -29106,7 +29467,7 @@ def V6_vS32b_new_npred_ppu_128B : HInst<
(outs IntRegs:$Rx32),
(ins PredRegs:$Pv4, IntRegs:$Rx32in, ModRegs:$Mu2, VectorRegs128B:$Os8),
"if (!$Pv4) vmem($Rx32++$Mu2) = $Os8.new",
-CVI_VM_NEW_ST, TypeCVI_VM_NEW_ST>, Enc_8498433, Requires<[HasV60T,UseHVX]>, NewValueRel {
+tc_8b6a873f, TypeCVI_VM_NEW_ST>, Enc_372c9d, Requires<[HasV60T,UseHVX]>, NewValueRel {
let Inst{10-3} = 0b00001101;
let Inst{31-21} = 0b00101011101;
let isPredicated = 1;
@@ -29127,7 +29488,7 @@ def V6_vS32b_new_pi : HInst<
(outs IntRegs:$Rx32),
(ins IntRegs:$Rx32in, s3_0Imm:$Ii, VectorRegs:$Os8),
"vmem($Rx32++#$Ii) = $Os8.new",
-CVI_VM_NEW_ST, TypeCVI_VM_NEW_ST>, Enc_12244921, Requires<[HasV60T,UseHVX]>, NewValueRel {
+tc_db5b9e2f, TypeCVI_VM_NEW_ST>, Enc_1aaec1, Requires<[HasV60T,UseHVX]>, NewValueRel {
let Inst{7-3} = 0b00100;
let Inst{13-11} = 0b000;
let Inst{31-21} = 0b00101001001;
@@ -29147,7 +29508,7 @@ def V6_vS32b_new_pi_128B : HInst<
(outs IntRegs:$Rx32),
(ins IntRegs:$Rx32in, s3_0Imm:$Ii, VectorRegs128B:$Os8),
"vmem($Rx32++#$Ii) = $Os8.new",
-CVI_VM_NEW_ST, TypeCVI_VM_NEW_ST>, Enc_11244923, Requires<[HasV60T,UseHVX]>, NewValueRel {
+tc_db5b9e2f, TypeCVI_VM_NEW_ST>, Enc_1aaec1, Requires<[HasV60T,UseHVX]>, NewValueRel {
let Inst{7-3} = 0b00100;
let Inst{13-11} = 0b000;
let Inst{31-21} = 0b00101001001;
@@ -29168,7 +29529,7 @@ def V6_vS32b_new_ppu : HInst<
(outs IntRegs:$Rx32),
(ins IntRegs:$Rx32in, ModRegs:$Mu2, VectorRegs:$Os8),
"vmem($Rx32++$Mu2) = $Os8.new",
-CVI_VM_NEW_ST, TypeCVI_VM_NEW_ST>, Enc_1589406, Requires<[HasV60T,UseHVX]>, NewValueRel {
+tc_db5b9e2f, TypeCVI_VM_NEW_ST>, Enc_cf1927, Requires<[HasV60T,UseHVX]>, NewValueRel {
let Inst{12-3} = 0b0000000100;
let Inst{31-21} = 0b00101011001;
let addrMode = PostInc;
@@ -29187,7 +29548,7 @@ def V6_vS32b_new_ppu_128B : HInst<
(outs IntRegs:$Rx32),
(ins IntRegs:$Rx32in, ModRegs:$Mu2, VectorRegs128B:$Os8),
"vmem($Rx32++$Mu2) = $Os8.new",
-CVI_VM_NEW_ST, TypeCVI_VM_NEW_ST>, Enc_1589406, Requires<[HasV60T,UseHVX]>, NewValueRel {
+tc_db5b9e2f, TypeCVI_VM_NEW_ST>, Enc_cf1927, Requires<[HasV60T,UseHVX]>, NewValueRel {
let Inst{12-3} = 0b0000000100;
let Inst{31-21} = 0b00101011001;
let addrMode = PostInc;
@@ -29207,7 +29568,7 @@ def V6_vS32b_new_pred_ai : HInst<
(outs),
(ins PredRegs:$Pv4, IntRegs:$Rt32, s4_0Imm:$Ii, VectorRegs:$Os8),
"if ($Pv4) vmem($Rt32+#$Ii) = $Os8.new",
-CVI_VM_NEW_ST, TypeCVI_VM_NEW_ST>, Enc_9372046, Requires<[HasV60T,UseHVX]>, NewValueRel {
+tc_d5090f3e, TypeCVI_VM_NEW_ST>, Enc_f7430e, Requires<[HasV60T,UseHVX]>, NewValueRel {
let Inst{7-3} = 0b01000;
let Inst{31-21} = 0b00101000101;
let isPredicated = 1;
@@ -29225,7 +29586,7 @@ def V6_vS32b_new_pred_ai_128B : HInst<
(outs),
(ins PredRegs:$Pv4, IntRegs:$Rt32, s4_0Imm:$Ii, VectorRegs128B:$Os8),
"if ($Pv4) vmem($Rt32+#$Ii) = $Os8.new",
-CVI_VM_NEW_ST, TypeCVI_VM_NEW_ST>, Enc_13937564, Requires<[HasV60T,UseHVX]>, NewValueRel {
+tc_d5090f3e, TypeCVI_VM_NEW_ST>, Enc_f7430e, Requires<[HasV60T,UseHVX]>, NewValueRel {
let Inst{7-3} = 0b01000;
let Inst{31-21} = 0b00101000101;
let isPredicated = 1;
@@ -29244,7 +29605,7 @@ def V6_vS32b_new_pred_pi : HInst<
(outs IntRegs:$Rx32),
(ins PredRegs:$Pv4, IntRegs:$Rx32in, s3_0Imm:$Ii, VectorRegs:$Os8),
"if ($Pv4) vmem($Rx32++#$Ii) = $Os8.new",
-CVI_VM_NEW_ST, TypeCVI_VM_NEW_ST>, Enc_3735566, Requires<[HasV60T,UseHVX]>, NewValueRel {
+tc_8b6a873f, TypeCVI_VM_NEW_ST>, Enc_784502, Requires<[HasV60T,UseHVX]>, NewValueRel {
let Inst{7-3} = 0b01000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00101001101;
@@ -29264,7 +29625,7 @@ def V6_vS32b_new_pred_pi_128B : HInst<
(outs IntRegs:$Rx32),
(ins PredRegs:$Pv4, IntRegs:$Rx32in, s3_0Imm:$Ii, VectorRegs128B:$Os8),
"if ($Pv4) vmem($Rx32++#$Ii) = $Os8.new",
-CVI_VM_NEW_ST, TypeCVI_VM_NEW_ST>, Enc_2735552, Requires<[HasV60T,UseHVX]>, NewValueRel {
+tc_8b6a873f, TypeCVI_VM_NEW_ST>, Enc_784502, Requires<[HasV60T,UseHVX]>, NewValueRel {
let Inst{7-3} = 0b01000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00101001101;
@@ -29285,7 +29646,7 @@ def V6_vS32b_new_pred_ppu : HInst<
(outs IntRegs:$Rx32),
(ins PredRegs:$Pv4, IntRegs:$Rx32in, ModRegs:$Mu2, VectorRegs:$Os8),
"if ($Pv4) vmem($Rx32++$Mu2) = $Os8.new",
-CVI_VM_NEW_ST, TypeCVI_VM_NEW_ST>, Enc_8498433, Requires<[HasV60T,UseHVX]>, NewValueRel {
+tc_8b6a873f, TypeCVI_VM_NEW_ST>, Enc_372c9d, Requires<[HasV60T,UseHVX]>, NewValueRel {
let Inst{10-3} = 0b00001000;
let Inst{31-21} = 0b00101011101;
let isPredicated = 1;
@@ -29304,7 +29665,7 @@ def V6_vS32b_new_pred_ppu_128B : HInst<
(outs IntRegs:$Rx32),
(ins PredRegs:$Pv4, IntRegs:$Rx32in, ModRegs:$Mu2, VectorRegs128B:$Os8),
"if ($Pv4) vmem($Rx32++$Mu2) = $Os8.new",
-CVI_VM_NEW_ST, TypeCVI_VM_NEW_ST>, Enc_8498433, Requires<[HasV60T,UseHVX]>, NewValueRel {
+tc_8b6a873f, TypeCVI_VM_NEW_ST>, Enc_372c9d, Requires<[HasV60T,UseHVX]>, NewValueRel {
let Inst{10-3} = 0b00001000;
let Inst{31-21} = 0b00101011101;
let isPredicated = 1;
@@ -29324,7 +29685,7 @@ def V6_vS32b_npred_ai : HInst<
(outs),
(ins PredRegs:$Pv4, IntRegs:$Rt32, s4_0Imm:$Ii, VectorRegs:$Vs32),
"if (!$Pv4) vmem($Rt32+#$Ii) = $Vs32",
-CVI_VM_ST, TypeCVI_VM_ST>, Enc_10075393, Requires<[HasV60T,UseHVX]>, NewValueRel {
+tc_85d237e3, TypeCVI_VM_ST>, Enc_27b757, Requires<[HasV60T,UseHVX]>, NewValueRel {
let Inst{7-5} = 0b001;
let Inst{31-21} = 0b00101000101;
let isPredicated = 1;
@@ -29340,7 +29701,7 @@ def V6_vS32b_npred_ai_128B : HInst<
(outs),
(ins PredRegs:$Pv4, IntRegs:$Rt32, s4_0Imm:$Ii, VectorRegs128B:$Vs32),
"if (!$Pv4) vmem($Rt32+#$Ii) = $Vs32",
-CVI_VM_ST, TypeCVI_VM_ST>, Enc_9470751, Requires<[HasV60T,UseHVX]>, NewValueRel {
+tc_85d237e3, TypeCVI_VM_ST>, Enc_27b757, Requires<[HasV60T,UseHVX]>, NewValueRel {
let Inst{7-5} = 0b001;
let Inst{31-21} = 0b00101000101;
let isPredicated = 1;
@@ -29357,7 +29718,7 @@ def V6_vS32b_npred_pi : HInst<
(outs IntRegs:$Rx32),
(ins PredRegs:$Pv4, IntRegs:$Rx32in, s3_0Imm:$Ii, VectorRegs:$Vs32),
"if (!$Pv4) vmem($Rx32++#$Ii) = $Vs32",
-CVI_VM_ST, TypeCVI_VM_ST>, Enc_15459921, Requires<[HasV60T,UseHVX]>, NewValueRel {
+tc_0317c6ca, TypeCVI_VM_ST>, Enc_865390, Requires<[HasV60T,UseHVX]>, NewValueRel {
let Inst{7-5} = 0b001;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00101001101;
@@ -29375,7 +29736,7 @@ def V6_vS32b_npred_pi_128B : HInst<
(outs IntRegs:$Rx32),
(ins PredRegs:$Pv4, IntRegs:$Rx32in, s3_0Imm:$Ii, VectorRegs128B:$Vs32),
"if (!$Pv4) vmem($Rx32++#$Ii) = $Vs32",
-CVI_VM_ST, TypeCVI_VM_ST>, Enc_14459927, Requires<[HasV60T,UseHVX]>, NewValueRel {
+tc_0317c6ca, TypeCVI_VM_ST>, Enc_865390, Requires<[HasV60T,UseHVX]>, NewValueRel {
let Inst{7-5} = 0b001;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00101001101;
@@ -29394,7 +29755,7 @@ def V6_vS32b_npred_ppu : HInst<
(outs IntRegs:$Rx32),
(ins PredRegs:$Pv4, IntRegs:$Rx32in, ModRegs:$Mu2, VectorRegs:$Vs32),
"if (!$Pv4) vmem($Rx32++$Mu2) = $Vs32",
-CVI_VM_ST, TypeCVI_VM_ST>, Enc_15733946, Requires<[HasV60T,UseHVX]>, NewValueRel {
+tc_0317c6ca, TypeCVI_VM_ST>, Enc_1ef990, Requires<[HasV60T,UseHVX]>, NewValueRel {
let Inst{10-5} = 0b000001;
let Inst{31-21} = 0b00101011101;
let isPredicated = 1;
@@ -29411,7 +29772,7 @@ def V6_vS32b_npred_ppu_128B : HInst<
(outs IntRegs:$Rx32),
(ins PredRegs:$Pv4, IntRegs:$Rx32in, ModRegs:$Mu2, VectorRegs128B:$Vs32),
"if (!$Pv4) vmem($Rx32++$Mu2) = $Vs32",
-CVI_VM_ST, TypeCVI_VM_ST>, Enc_15733946, Requires<[HasV60T,UseHVX]>, NewValueRel {
+tc_0317c6ca, TypeCVI_VM_ST>, Enc_1ef990, Requires<[HasV60T,UseHVX]>, NewValueRel {
let Inst{10-5} = 0b000001;
let Inst{31-21} = 0b00101011101;
let isPredicated = 1;
@@ -29429,7 +29790,7 @@ def V6_vS32b_nqpred_ai : HInst<
(outs),
(ins VecPredRegs:$Qv4, IntRegs:$Rt32, s4_0Imm:$Ii, VectorRegs:$Vs32),
"if (!$Qv4) vmem($Rt32+#$Ii) = $Vs32",
-CVI_VM_ST, TypeCVI_VM_ST>, Enc_16279406, Requires<[HasV60T,UseHVX]> {
+tc_aedb9f9e, TypeCVI_VM_ST>, Enc_2ea740, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b001;
let Inst{31-21} = 0b00101000100;
let addrMode = BaseImmOffset;
@@ -29441,7 +29802,7 @@ def V6_vS32b_nqpred_ai_128B : HInst<
(outs),
(ins VecPredRegs128B:$Qv4, IntRegs:$Rt32, s4_0Imm:$Ii, VectorRegs128B:$Vs32),
"if (!$Qv4) vmem($Rt32+#$Ii) = $Vs32",
-CVI_VM_ST, TypeCVI_VM_ST>, Enc_2703240, Requires<[HasV60T,UseHVX]> {
+tc_aedb9f9e, TypeCVI_VM_ST>, Enc_2ea740, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b001;
let Inst{31-21} = 0b00101000100;
let addrMode = BaseImmOffset;
@@ -29454,7 +29815,7 @@ def V6_vS32b_nqpred_pi : HInst<
(outs IntRegs:$Rx32),
(ins VecPredRegs:$Qv4, IntRegs:$Rx32in, s3_0Imm:$Ii, VectorRegs:$Vs32),
"if (!$Qv4) vmem($Rx32++#$Ii) = $Vs32",
-CVI_VM_ST, TypeCVI_VM_ST>, Enc_12397062, Requires<[HasV60T,UseHVX]> {
+tc_99093773, TypeCVI_VM_ST>, Enc_0b51ce, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b001;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00101001100;
@@ -29468,7 +29829,7 @@ def V6_vS32b_nqpred_pi_128B : HInst<
(outs IntRegs:$Rx32),
(ins VecPredRegs128B:$Qv4, IntRegs:$Rx32in, s3_0Imm:$Ii, VectorRegs128B:$Vs32),
"if (!$Qv4) vmem($Rx32++#$Ii) = $Vs32",
-CVI_VM_ST, TypeCVI_VM_ST>, Enc_13397056, Requires<[HasV60T,UseHVX]> {
+tc_99093773, TypeCVI_VM_ST>, Enc_0b51ce, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b001;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00101001100;
@@ -29483,7 +29844,7 @@ def V6_vS32b_nqpred_ppu : HInst<
(outs IntRegs:$Rx32),
(ins VecPredRegs:$Qv4, IntRegs:$Rx32in, ModRegs:$Mu2, VectorRegs:$Vs32),
"if (!$Qv4) vmem($Rx32++$Mu2) = $Vs32",
-CVI_VM_ST, TypeCVI_VM_ST>, Enc_13425035, Requires<[HasV60T,UseHVX]> {
+tc_99093773, TypeCVI_VM_ST>, Enc_4dff07, Requires<[HasV60T,UseHVX]> {
let Inst{10-5} = 0b000001;
let Inst{31-21} = 0b00101011100;
let addrMode = PostInc;
@@ -29496,7 +29857,7 @@ def V6_vS32b_nqpred_ppu_128B : HInst<
(outs IntRegs:$Rx32),
(ins VecPredRegs128B:$Qv4, IntRegs:$Rx32in, ModRegs:$Mu2, VectorRegs128B:$Vs32),
"if (!$Qv4) vmem($Rx32++$Mu2) = $Vs32",
-CVI_VM_ST, TypeCVI_VM_ST>, Enc_13425035, Requires<[HasV60T,UseHVX]> {
+tc_99093773, TypeCVI_VM_ST>, Enc_4dff07, Requires<[HasV60T,UseHVX]> {
let Inst{10-5} = 0b000001;
let Inst{31-21} = 0b00101011100;
let addrMode = PostInc;
@@ -29510,14 +29871,14 @@ def V6_vS32b_nt_ai : HInst<
(outs),
(ins IntRegs:$Rt32, s4_0Imm:$Ii, VectorRegs:$Vs32),
"vmem($Rt32+#$Ii):nt = $Vs32",
-CVI_VM_ST, TypeCVI_VM_ST>, Enc_6923828, Requires<[HasV60T,UseHVX]>, NewValueRel {
+tc_e3748cdf, TypeCVI_VM_ST>, Enc_c9e3bc, Requires<[HasV60T,UseHVX]>, NewValueRel {
let Inst{7-5} = 0b000;
let Inst{12-11} = 0b00;
let Inst{31-21} = 0b00101000011;
let addrMode = BaseImmOffset;
let accessSize = Vector64Access;
-let mayStore = 1;
let isNonTemporal = 1;
+let mayStore = 1;
let BaseOpcode = "V6_vS32b_ai";
let isNVStorable = 1;
let isPredicable = 1;
@@ -29527,14 +29888,14 @@ def V6_vS32b_nt_ai_128B : HInst<
(outs),
(ins IntRegs:$Rt32, s4_0Imm:$Ii, VectorRegs128B:$Vs32),
"vmem($Rt32+#$Ii):nt = $Vs32",
-CVI_VM_ST, TypeCVI_VM_ST>, Enc_5757366, Requires<[HasV60T,UseHVX]>, NewValueRel {
+tc_e3748cdf, TypeCVI_VM_ST>, Enc_c9e3bc, Requires<[HasV60T,UseHVX]>, NewValueRel {
let Inst{7-5} = 0b000;
let Inst{12-11} = 0b00;
let Inst{31-21} = 0b00101000011;
let addrMode = BaseImmOffset;
let accessSize = Vector128Access;
-let mayStore = 1;
let isNonTemporal = 1;
+let mayStore = 1;
let BaseOpcode = "V6_vS32b_ai_128B";
let isNVStorable = 1;
let isPredicable = 1;
@@ -29545,7 +29906,7 @@ def V6_vS32b_nt_new_ai : HInst<
(outs),
(ins IntRegs:$Rt32, s4_0Imm:$Ii, VectorRegs:$Os8),
"vmem($Rt32+#$Ii):nt = $Os8.new",
-CVI_VM_NEW_ST, TypeCVI_VM_NEW_ST>, Enc_6608821, Requires<[HasV60T,UseHVX]>, NewValueRel {
+tc_1b93bdc6, TypeCVI_VM_NEW_ST>, Enc_f77fbc, Requires<[HasV60T,UseHVX]>, NewValueRel {
let Inst{7-3} = 0b00100;
let Inst{12-11} = 0b00;
let Inst{31-21} = 0b00101000011;
@@ -29565,7 +29926,7 @@ def V6_vS32b_nt_new_ai_128B : HInst<
(outs),
(ins IntRegs:$Rt32, s4_0Imm:$Ii, VectorRegs128B:$Os8),
"vmem($Rt32+#$Ii):nt = $Os8.new",
-CVI_VM_NEW_ST, TypeCVI_VM_NEW_ST>, Enc_2152247, Requires<[HasV60T,UseHVX]>, NewValueRel {
+tc_1b93bdc6, TypeCVI_VM_NEW_ST>, Enc_f77fbc, Requires<[HasV60T,UseHVX]>, NewValueRel {
let Inst{7-3} = 0b00100;
let Inst{12-11} = 0b00;
let Inst{31-21} = 0b00101000011;
@@ -29586,7 +29947,7 @@ def V6_vS32b_nt_new_npred_ai : HInst<
(outs),
(ins PredRegs:$Pv4, IntRegs:$Rt32, s4_0Imm:$Ii, VectorRegs:$Os8),
"if (!$Pv4) vmem($Rt32+#$Ii):nt = $Os8.new",
-CVI_VM_NEW_ST, TypeCVI_VM_NEW_ST>, Enc_9372046, Requires<[HasV60T,UseHVX]>, NewValueRel {
+tc_d5090f3e, TypeCVI_VM_NEW_ST>, Enc_f7430e, Requires<[HasV60T,UseHVX]>, NewValueRel {
let Inst{7-3} = 0b01111;
let Inst{31-21} = 0b00101000111;
let isPredicated = 1;
@@ -29606,7 +29967,7 @@ def V6_vS32b_nt_new_npred_ai_128B : HInst<
(outs),
(ins PredRegs:$Pv4, IntRegs:$Rt32, s4_0Imm:$Ii, VectorRegs128B:$Os8),
"if (!$Pv4) vmem($Rt32+#$Ii):nt = $Os8.new",
-CVI_VM_NEW_ST, TypeCVI_VM_NEW_ST>, Enc_13937564, Requires<[HasV60T,UseHVX]>, NewValueRel {
+tc_d5090f3e, TypeCVI_VM_NEW_ST>, Enc_f7430e, Requires<[HasV60T,UseHVX]>, NewValueRel {
let Inst{7-3} = 0b01111;
let Inst{31-21} = 0b00101000111;
let isPredicated = 1;
@@ -29627,7 +29988,7 @@ def V6_vS32b_nt_new_npred_pi : HInst<
(outs IntRegs:$Rx32),
(ins PredRegs:$Pv4, IntRegs:$Rx32in, s3_0Imm:$Ii, VectorRegs:$Os8),
"if (!$Pv4) vmem($Rx32++#$Ii):nt = $Os8.new",
-CVI_VM_NEW_ST, TypeCVI_VM_NEW_ST>, Enc_3735566, Requires<[HasV60T,UseHVX]>, NewValueRel {
+tc_8b6a873f, TypeCVI_VM_NEW_ST>, Enc_784502, Requires<[HasV60T,UseHVX]>, NewValueRel {
let Inst{7-3} = 0b01111;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00101001111;
@@ -29649,7 +30010,7 @@ def V6_vS32b_nt_new_npred_pi_128B : HInst<
(outs IntRegs:$Rx32),
(ins PredRegs:$Pv4, IntRegs:$Rx32in, s3_0Imm:$Ii, VectorRegs128B:$Os8),
"if (!$Pv4) vmem($Rx32++#$Ii):nt = $Os8.new",
-CVI_VM_NEW_ST, TypeCVI_VM_NEW_ST>, Enc_2735552, Requires<[HasV60T,UseHVX]>, NewValueRel {
+tc_8b6a873f, TypeCVI_VM_NEW_ST>, Enc_784502, Requires<[HasV60T,UseHVX]>, NewValueRel {
let Inst{7-3} = 0b01111;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00101001111;
@@ -29672,7 +30033,7 @@ def V6_vS32b_nt_new_npred_ppu : HInst<
(outs IntRegs:$Rx32),
(ins PredRegs:$Pv4, IntRegs:$Rx32in, ModRegs:$Mu2, VectorRegs:$Os8),
"if (!$Pv4) vmem($Rx32++$Mu2):nt = $Os8.new",
-CVI_VM_NEW_ST, TypeCVI_VM_NEW_ST>, Enc_8498433, Requires<[HasV60T,UseHVX]>, NewValueRel {
+tc_8b6a873f, TypeCVI_VM_NEW_ST>, Enc_372c9d, Requires<[HasV60T,UseHVX]>, NewValueRel {
let Inst{10-3} = 0b00001111;
let Inst{31-21} = 0b00101011111;
let isPredicated = 1;
@@ -29693,7 +30054,7 @@ def V6_vS32b_nt_new_npred_ppu_128B : HInst<
(outs IntRegs:$Rx32),
(ins PredRegs:$Pv4, IntRegs:$Rx32in, ModRegs:$Mu2, VectorRegs128B:$Os8),
"if (!$Pv4) vmem($Rx32++$Mu2):nt = $Os8.new",
-CVI_VM_NEW_ST, TypeCVI_VM_NEW_ST>, Enc_8498433, Requires<[HasV60T,UseHVX]>, NewValueRel {
+tc_8b6a873f, TypeCVI_VM_NEW_ST>, Enc_372c9d, Requires<[HasV60T,UseHVX]>, NewValueRel {
let Inst{10-3} = 0b00001111;
let Inst{31-21} = 0b00101011111;
let isPredicated = 1;
@@ -29715,7 +30076,7 @@ def V6_vS32b_nt_new_pi : HInst<
(outs IntRegs:$Rx32),
(ins IntRegs:$Rx32in, s3_0Imm:$Ii, VectorRegs:$Os8),
"vmem($Rx32++#$Ii):nt = $Os8.new",
-CVI_VM_NEW_ST, TypeCVI_VM_NEW_ST>, Enc_12244921, Requires<[HasV60T,UseHVX]>, NewValueRel {
+tc_db5b9e2f, TypeCVI_VM_NEW_ST>, Enc_1aaec1, Requires<[HasV60T,UseHVX]>, NewValueRel {
let Inst{7-3} = 0b00100;
let Inst{13-11} = 0b000;
let Inst{31-21} = 0b00101001011;
@@ -29736,7 +30097,7 @@ def V6_vS32b_nt_new_pi_128B : HInst<
(outs IntRegs:$Rx32),
(ins IntRegs:$Rx32in, s3_0Imm:$Ii, VectorRegs128B:$Os8),
"vmem($Rx32++#$Ii):nt = $Os8.new",
-CVI_VM_NEW_ST, TypeCVI_VM_NEW_ST>, Enc_11244923, Requires<[HasV60T,UseHVX]>, NewValueRel {
+tc_db5b9e2f, TypeCVI_VM_NEW_ST>, Enc_1aaec1, Requires<[HasV60T,UseHVX]>, NewValueRel {
let Inst{7-3} = 0b00100;
let Inst{13-11} = 0b000;
let Inst{31-21} = 0b00101001011;
@@ -29758,7 +30119,7 @@ def V6_vS32b_nt_new_ppu : HInst<
(outs IntRegs:$Rx32),
(ins IntRegs:$Rx32in, ModRegs:$Mu2, VectorRegs:$Os8),
"vmem($Rx32++$Mu2):nt = $Os8.new",
-CVI_VM_NEW_ST, TypeCVI_VM_NEW_ST>, Enc_1589406, Requires<[HasV60T,UseHVX]>, NewValueRel {
+tc_db5b9e2f, TypeCVI_VM_NEW_ST>, Enc_cf1927, Requires<[HasV60T,UseHVX]>, NewValueRel {
let Inst{12-3} = 0b0000000100;
let Inst{31-21} = 0b00101011011;
let addrMode = PostInc;
@@ -29778,7 +30139,7 @@ def V6_vS32b_nt_new_ppu_128B : HInst<
(outs IntRegs:$Rx32),
(ins IntRegs:$Rx32in, ModRegs:$Mu2, VectorRegs128B:$Os8),
"vmem($Rx32++$Mu2):nt = $Os8.new",
-CVI_VM_NEW_ST, TypeCVI_VM_NEW_ST>, Enc_1589406, Requires<[HasV60T,UseHVX]>, NewValueRel {
+tc_db5b9e2f, TypeCVI_VM_NEW_ST>, Enc_cf1927, Requires<[HasV60T,UseHVX]>, NewValueRel {
let Inst{12-3} = 0b0000000100;
let Inst{31-21} = 0b00101011011;
let addrMode = PostInc;
@@ -29799,7 +30160,7 @@ def V6_vS32b_nt_new_pred_ai : HInst<
(outs),
(ins PredRegs:$Pv4, IntRegs:$Rt32, s4_0Imm:$Ii, VectorRegs:$Os8),
"if ($Pv4) vmem($Rt32+#$Ii):nt = $Os8.new",
-CVI_VM_NEW_ST, TypeCVI_VM_NEW_ST>, Enc_9372046, Requires<[HasV60T,UseHVX]>, NewValueRel {
+tc_d5090f3e, TypeCVI_VM_NEW_ST>, Enc_f7430e, Requires<[HasV60T,UseHVX]>, NewValueRel {
let Inst{7-3} = 0b01010;
let Inst{31-21} = 0b00101000111;
let isPredicated = 1;
@@ -29818,7 +30179,7 @@ def V6_vS32b_nt_new_pred_ai_128B : HInst<
(outs),
(ins PredRegs:$Pv4, IntRegs:$Rt32, s4_0Imm:$Ii, VectorRegs128B:$Os8),
"if ($Pv4) vmem($Rt32+#$Ii):nt = $Os8.new",
-CVI_VM_NEW_ST, TypeCVI_VM_NEW_ST>, Enc_13937564, Requires<[HasV60T,UseHVX]>, NewValueRel {
+tc_d5090f3e, TypeCVI_VM_NEW_ST>, Enc_f7430e, Requires<[HasV60T,UseHVX]>, NewValueRel {
let Inst{7-3} = 0b01010;
let Inst{31-21} = 0b00101000111;
let isPredicated = 1;
@@ -29838,7 +30199,7 @@ def V6_vS32b_nt_new_pred_pi : HInst<
(outs IntRegs:$Rx32),
(ins PredRegs:$Pv4, IntRegs:$Rx32in, s3_0Imm:$Ii, VectorRegs:$Os8),
"if ($Pv4) vmem($Rx32++#$Ii):nt = $Os8.new",
-CVI_VM_NEW_ST, TypeCVI_VM_NEW_ST>, Enc_3735566, Requires<[HasV60T,UseHVX]>, NewValueRel {
+tc_8b6a873f, TypeCVI_VM_NEW_ST>, Enc_784502, Requires<[HasV60T,UseHVX]>, NewValueRel {
let Inst{7-3} = 0b01010;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00101001111;
@@ -29859,7 +30220,7 @@ def V6_vS32b_nt_new_pred_pi_128B : HInst<
(outs IntRegs:$Rx32),
(ins PredRegs:$Pv4, IntRegs:$Rx32in, s3_0Imm:$Ii, VectorRegs128B:$Os8),
"if ($Pv4) vmem($Rx32++#$Ii):nt = $Os8.new",
-CVI_VM_NEW_ST, TypeCVI_VM_NEW_ST>, Enc_2735552, Requires<[HasV60T,UseHVX]>, NewValueRel {
+tc_8b6a873f, TypeCVI_VM_NEW_ST>, Enc_784502, Requires<[HasV60T,UseHVX]>, NewValueRel {
let Inst{7-3} = 0b01010;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00101001111;
@@ -29881,7 +30242,7 @@ def V6_vS32b_nt_new_pred_ppu : HInst<
(outs IntRegs:$Rx32),
(ins PredRegs:$Pv4, IntRegs:$Rx32in, ModRegs:$Mu2, VectorRegs:$Os8),
"if ($Pv4) vmem($Rx32++$Mu2):nt = $Os8.new",
-CVI_VM_NEW_ST, TypeCVI_VM_NEW_ST>, Enc_8498433, Requires<[HasV60T,UseHVX]>, NewValueRel {
+tc_8b6a873f, TypeCVI_VM_NEW_ST>, Enc_372c9d, Requires<[HasV60T,UseHVX]>, NewValueRel {
let Inst{10-3} = 0b00001010;
let Inst{31-21} = 0b00101011111;
let isPredicated = 1;
@@ -29901,7 +30262,7 @@ def V6_vS32b_nt_new_pred_ppu_128B : HInst<
(outs IntRegs:$Rx32),
(ins PredRegs:$Pv4, IntRegs:$Rx32in, ModRegs:$Mu2, VectorRegs128B:$Os8),
"if ($Pv4) vmem($Rx32++$Mu2):nt = $Os8.new",
-CVI_VM_NEW_ST, TypeCVI_VM_NEW_ST>, Enc_8498433, Requires<[HasV60T,UseHVX]>, NewValueRel {
+tc_8b6a873f, TypeCVI_VM_NEW_ST>, Enc_372c9d, Requires<[HasV60T,UseHVX]>, NewValueRel {
let Inst{10-3} = 0b00001010;
let Inst{31-21} = 0b00101011111;
let isPredicated = 1;
@@ -29922,15 +30283,15 @@ def V6_vS32b_nt_npred_ai : HInst<
(outs),
(ins PredRegs:$Pv4, IntRegs:$Rt32, s4_0Imm:$Ii, VectorRegs:$Vs32),
"if (!$Pv4) vmem($Rt32+#$Ii):nt = $Vs32",
-CVI_VM_ST, TypeCVI_VM_ST>, Enc_10075393, Requires<[HasV60T,UseHVX]>, NewValueRel {
+tc_85d237e3, TypeCVI_VM_ST>, Enc_27b757, Requires<[HasV60T,UseHVX]>, NewValueRel {
let Inst{7-5} = 0b001;
let Inst{31-21} = 0b00101000111;
let isPredicated = 1;
let isPredicatedFalse = 1;
let addrMode = BaseImmOffset;
let accessSize = Vector64Access;
-let mayStore = 1;
let isNonTemporal = 1;
+let mayStore = 1;
let BaseOpcode = "V6_vS32b_ai";
let isNVStorable = 1;
let DecoderNamespace = "EXT_mmvec";
@@ -29939,15 +30300,15 @@ def V6_vS32b_nt_npred_ai_128B : HInst<
(outs),
(ins PredRegs:$Pv4, IntRegs:$Rt32, s4_0Imm:$Ii, VectorRegs128B:$Vs32),
"if (!$Pv4) vmem($Rt32+#$Ii):nt = $Vs32",
-CVI_VM_ST, TypeCVI_VM_ST>, Enc_9470751, Requires<[HasV60T,UseHVX]>, NewValueRel {
+tc_85d237e3, TypeCVI_VM_ST>, Enc_27b757, Requires<[HasV60T,UseHVX]>, NewValueRel {
let Inst{7-5} = 0b001;
let Inst{31-21} = 0b00101000111;
let isPredicated = 1;
let isPredicatedFalse = 1;
let addrMode = BaseImmOffset;
let accessSize = Vector128Access;
-let mayStore = 1;
let isNonTemporal = 1;
+let mayStore = 1;
let BaseOpcode = "V6_vS32b_ai_128B";
let isNVStorable = 1;
let DecoderNamespace = "EXT_mmvec";
@@ -29957,7 +30318,7 @@ def V6_vS32b_nt_npred_pi : HInst<
(outs IntRegs:$Rx32),
(ins PredRegs:$Pv4, IntRegs:$Rx32in, s3_0Imm:$Ii, VectorRegs:$Vs32),
"if (!$Pv4) vmem($Rx32++#$Ii):nt = $Vs32",
-CVI_VM_ST, TypeCVI_VM_ST>, Enc_15459921, Requires<[HasV60T,UseHVX]>, NewValueRel {
+tc_0317c6ca, TypeCVI_VM_ST>, Enc_865390, Requires<[HasV60T,UseHVX]>, NewValueRel {
let Inst{7-5} = 0b001;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00101001111;
@@ -29965,8 +30326,8 @@ let isPredicated = 1;
let isPredicatedFalse = 1;
let addrMode = PostInc;
let accessSize = Vector64Access;
-let mayStore = 1;
let isNonTemporal = 1;
+let mayStore = 1;
let BaseOpcode = "V6_vS32b_pi";
let isNVStorable = 1;
let DecoderNamespace = "EXT_mmvec";
@@ -29976,7 +30337,7 @@ def V6_vS32b_nt_npred_pi_128B : HInst<
(outs IntRegs:$Rx32),
(ins PredRegs:$Pv4, IntRegs:$Rx32in, s3_0Imm:$Ii, VectorRegs128B:$Vs32),
"if (!$Pv4) vmem($Rx32++#$Ii):nt = $Vs32",
-CVI_VM_ST, TypeCVI_VM_ST>, Enc_14459927, Requires<[HasV60T,UseHVX]>, NewValueRel {
+tc_0317c6ca, TypeCVI_VM_ST>, Enc_865390, Requires<[HasV60T,UseHVX]>, NewValueRel {
let Inst{7-5} = 0b001;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00101001111;
@@ -29984,8 +30345,8 @@ let isPredicated = 1;
let isPredicatedFalse = 1;
let addrMode = PostInc;
let accessSize = Vector128Access;
-let mayStore = 1;
let isNonTemporal = 1;
+let mayStore = 1;
let BaseOpcode = "V6_vS32b_pi_128B";
let isNVStorable = 1;
let DecoderNamespace = "EXT_mmvec";
@@ -29996,15 +30357,15 @@ def V6_vS32b_nt_npred_ppu : HInst<
(outs IntRegs:$Rx32),
(ins PredRegs:$Pv4, IntRegs:$Rx32in, ModRegs:$Mu2, VectorRegs:$Vs32),
"if (!$Pv4) vmem($Rx32++$Mu2):nt = $Vs32",
-CVI_VM_ST, TypeCVI_VM_ST>, Enc_15733946, Requires<[HasV60T,UseHVX]>, NewValueRel {
+tc_0317c6ca, TypeCVI_VM_ST>, Enc_1ef990, Requires<[HasV60T,UseHVX]>, NewValueRel {
let Inst{10-5} = 0b000001;
let Inst{31-21} = 0b00101011111;
let isPredicated = 1;
let isPredicatedFalse = 1;
let addrMode = PostInc;
let accessSize = Vector64Access;
-let mayStore = 1;
let isNonTemporal = 1;
+let mayStore = 1;
let BaseOpcode = "V6_vS32b_ppu";
let isNVStorable = 1;
let DecoderNamespace = "EXT_mmvec";
@@ -30014,15 +30375,15 @@ def V6_vS32b_nt_npred_ppu_128B : HInst<
(outs IntRegs:$Rx32),
(ins PredRegs:$Pv4, IntRegs:$Rx32in, ModRegs:$Mu2, VectorRegs128B:$Vs32),
"if (!$Pv4) vmem($Rx32++$Mu2):nt = $Vs32",
-CVI_VM_ST, TypeCVI_VM_ST>, Enc_15733946, Requires<[HasV60T,UseHVX]>, NewValueRel {
+tc_0317c6ca, TypeCVI_VM_ST>, Enc_1ef990, Requires<[HasV60T,UseHVX]>, NewValueRel {
let Inst{10-5} = 0b000001;
let Inst{31-21} = 0b00101011111;
let isPredicated = 1;
let isPredicatedFalse = 1;
let addrMode = PostInc;
let accessSize = Vector128Access;
-let mayStore = 1;
let isNonTemporal = 1;
+let mayStore = 1;
let BaseOpcode = "V6_vS32b_ppu_128B";
let isNVStorable = 1;
let DecoderNamespace = "EXT_mmvec";
@@ -30033,26 +30394,26 @@ def V6_vS32b_nt_nqpred_ai : HInst<
(outs),
(ins VecPredRegs:$Qv4, IntRegs:$Rt32, s4_0Imm:$Ii, VectorRegs:$Vs32),
"if (!$Qv4) vmem($Rt32+#$Ii):nt = $Vs32",
-CVI_VM_ST, TypeCVI_VM_ST>, Enc_16279406, Requires<[HasV60T,UseHVX]> {
+tc_aedb9f9e, TypeCVI_VM_ST>, Enc_2ea740, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b001;
let Inst{31-21} = 0b00101000110;
let addrMode = BaseImmOffset;
let accessSize = Vector64Access;
-let mayStore = 1;
let isNonTemporal = 1;
+let mayStore = 1;
let DecoderNamespace = "EXT_mmvec";
}
def V6_vS32b_nt_nqpred_ai_128B : HInst<
(outs),
(ins VecPredRegs128B:$Qv4, IntRegs:$Rt32, s4_0Imm:$Ii, VectorRegs128B:$Vs32),
"if (!$Qv4) vmem($Rt32+#$Ii):nt = $Vs32",
-CVI_VM_ST, TypeCVI_VM_ST>, Enc_2703240, Requires<[HasV60T,UseHVX]> {
+tc_aedb9f9e, TypeCVI_VM_ST>, Enc_2ea740, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b001;
let Inst{31-21} = 0b00101000110;
let addrMode = BaseImmOffset;
let accessSize = Vector128Access;
-let mayStore = 1;
let isNonTemporal = 1;
+let mayStore = 1;
let DecoderNamespace = "EXT_mmvec";
let isCodeGenOnly = 1;
}
@@ -30060,14 +30421,14 @@ def V6_vS32b_nt_nqpred_pi : HInst<
(outs IntRegs:$Rx32),
(ins VecPredRegs:$Qv4, IntRegs:$Rx32in, s3_0Imm:$Ii, VectorRegs:$Vs32),
"if (!$Qv4) vmem($Rx32++#$Ii):nt = $Vs32",
-CVI_VM_ST, TypeCVI_VM_ST>, Enc_12397062, Requires<[HasV60T,UseHVX]> {
+tc_99093773, TypeCVI_VM_ST>, Enc_0b51ce, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b001;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00101001110;
let addrMode = PostInc;
let accessSize = Vector64Access;
-let mayStore = 1;
let isNonTemporal = 1;
+let mayStore = 1;
let DecoderNamespace = "EXT_mmvec";
let Constraints = "$Rx32 = $Rx32in";
}
@@ -30075,14 +30436,14 @@ def V6_vS32b_nt_nqpred_pi_128B : HInst<
(outs IntRegs:$Rx32),
(ins VecPredRegs128B:$Qv4, IntRegs:$Rx32in, s3_0Imm:$Ii, VectorRegs128B:$Vs32),
"if (!$Qv4) vmem($Rx32++#$Ii):nt = $Vs32",
-CVI_VM_ST, TypeCVI_VM_ST>, Enc_13397056, Requires<[HasV60T,UseHVX]> {
+tc_99093773, TypeCVI_VM_ST>, Enc_0b51ce, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b001;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00101001110;
let addrMode = PostInc;
let accessSize = Vector128Access;
-let mayStore = 1;
let isNonTemporal = 1;
+let mayStore = 1;
let DecoderNamespace = "EXT_mmvec";
let isCodeGenOnly = 1;
let Constraints = "$Rx32 = $Rx32in";
@@ -30091,13 +30452,13 @@ def V6_vS32b_nt_nqpred_ppu : HInst<
(outs IntRegs:$Rx32),
(ins VecPredRegs:$Qv4, IntRegs:$Rx32in, ModRegs:$Mu2, VectorRegs:$Vs32),
"if (!$Qv4) vmem($Rx32++$Mu2):nt = $Vs32",
-CVI_VM_ST, TypeCVI_VM_ST>, Enc_13425035, Requires<[HasV60T,UseHVX]> {
+tc_99093773, TypeCVI_VM_ST>, Enc_4dff07, Requires<[HasV60T,UseHVX]> {
let Inst{10-5} = 0b000001;
let Inst{31-21} = 0b00101011110;
let addrMode = PostInc;
let accessSize = Vector64Access;
-let mayStore = 1;
let isNonTemporal = 1;
+let mayStore = 1;
let DecoderNamespace = "EXT_mmvec";
let Constraints = "$Rx32 = $Rx32in";
}
@@ -30105,13 +30466,13 @@ def V6_vS32b_nt_nqpred_ppu_128B : HInst<
(outs IntRegs:$Rx32),
(ins VecPredRegs128B:$Qv4, IntRegs:$Rx32in, ModRegs:$Mu2, VectorRegs128B:$Vs32),
"if (!$Qv4) vmem($Rx32++$Mu2):nt = $Vs32",
-CVI_VM_ST, TypeCVI_VM_ST>, Enc_13425035, Requires<[HasV60T,UseHVX]> {
+tc_99093773, TypeCVI_VM_ST>, Enc_4dff07, Requires<[HasV60T,UseHVX]> {
let Inst{10-5} = 0b000001;
let Inst{31-21} = 0b00101011110;
let addrMode = PostInc;
let accessSize = Vector128Access;
-let mayStore = 1;
let isNonTemporal = 1;
+let mayStore = 1;
let DecoderNamespace = "EXT_mmvec";
let isCodeGenOnly = 1;
let Constraints = "$Rx32 = $Rx32in";
@@ -30120,14 +30481,14 @@ def V6_vS32b_nt_pi : HInst<
(outs IntRegs:$Rx32),
(ins IntRegs:$Rx32in, s3_0Imm:$Ii, VectorRegs:$Vs32),
"vmem($Rx32++#$Ii):nt = $Vs32",
-CVI_VM_ST, TypeCVI_VM_ST>, Enc_3296020, Requires<[HasV60T,UseHVX]>, NewValueRel {
+tc_a4c9df3b, TypeCVI_VM_ST>, Enc_b62ef7, Requires<[HasV60T,UseHVX]>, NewValueRel {
let Inst{7-5} = 0b000;
let Inst{13-11} = 0b000;
let Inst{31-21} = 0b00101001011;
let addrMode = PostInc;
let accessSize = Vector64Access;
-let mayStore = 1;
let isNonTemporal = 1;
+let mayStore = 1;
let BaseOpcode = "V6_vS32b_pi";
let isNVStorable = 1;
let isPredicable = 1;
@@ -30138,14 +30499,14 @@ def V6_vS32b_nt_pi_128B : HInst<
(outs IntRegs:$Rx32),
(ins IntRegs:$Rx32in, s3_0Imm:$Ii, VectorRegs128B:$Vs32),
"vmem($Rx32++#$Ii):nt = $Vs32",
-CVI_VM_ST, TypeCVI_VM_ST>, Enc_2296022, Requires<[HasV60T,UseHVX]>, NewValueRel {
+tc_a4c9df3b, TypeCVI_VM_ST>, Enc_b62ef7, Requires<[HasV60T,UseHVX]>, NewValueRel {
let Inst{7-5} = 0b000;
let Inst{13-11} = 0b000;
let Inst{31-21} = 0b00101001011;
let addrMode = PostInc;
let accessSize = Vector128Access;
-let mayStore = 1;
let isNonTemporal = 1;
+let mayStore = 1;
let BaseOpcode = "V6_vS32b_pi_128B";
let isNVStorable = 1;
let isPredicable = 1;
@@ -30157,13 +30518,13 @@ def V6_vS32b_nt_ppu : HInst<
(outs IntRegs:$Rx32),
(ins IntRegs:$Rx32in, ModRegs:$Mu2, VectorRegs:$Vs32),
"vmem($Rx32++$Mu2):nt = $Vs32",
-CVI_VM_ST, TypeCVI_VM_ST>, Enc_11281763, Requires<[HasV60T,UseHVX]>, NewValueRel {
+tc_a4c9df3b, TypeCVI_VM_ST>, Enc_d15d19, Requires<[HasV60T,UseHVX]>, NewValueRel {
let Inst{12-5} = 0b00000000;
let Inst{31-21} = 0b00101011011;
let addrMode = PostInc;
let accessSize = Vector64Access;
-let mayStore = 1;
let isNonTemporal = 1;
+let mayStore = 1;
let BaseOpcode = "V6_vS32b_ppu";
let isNVStorable = 1;
let isPredicable = 1;
@@ -30174,13 +30535,13 @@ def V6_vS32b_nt_ppu_128B : HInst<
(outs IntRegs:$Rx32),
(ins IntRegs:$Rx32in, ModRegs:$Mu2, VectorRegs128B:$Vs32),
"vmem($Rx32++$Mu2):nt = $Vs32",
-CVI_VM_ST, TypeCVI_VM_ST>, Enc_11281763, Requires<[HasV60T,UseHVX]>, NewValueRel {
+tc_a4c9df3b, TypeCVI_VM_ST>, Enc_d15d19, Requires<[HasV60T,UseHVX]>, NewValueRel {
let Inst{12-5} = 0b00000000;
let Inst{31-21} = 0b00101011011;
let addrMode = PostInc;
let accessSize = Vector128Access;
-let mayStore = 1;
let isNonTemporal = 1;
+let mayStore = 1;
let BaseOpcode = "V6_vS32b_ppu_128B";
let isNVStorable = 1;
let isPredicable = 1;
@@ -30192,14 +30553,14 @@ def V6_vS32b_nt_pred_ai : HInst<
(outs),
(ins PredRegs:$Pv4, IntRegs:$Rt32, s4_0Imm:$Ii, VectorRegs:$Vs32),
"if ($Pv4) vmem($Rt32+#$Ii):nt = $Vs32",
-CVI_VM_ST, TypeCVI_VM_ST>, Enc_10075393, Requires<[HasV60T,UseHVX]>, NewValueRel {
+tc_85d237e3, TypeCVI_VM_ST>, Enc_27b757, Requires<[HasV60T,UseHVX]>, NewValueRel {
let Inst{7-5} = 0b000;
let Inst{31-21} = 0b00101000111;
let isPredicated = 1;
let addrMode = BaseImmOffset;
let accessSize = Vector64Access;
-let mayStore = 1;
let isNonTemporal = 1;
+let mayStore = 1;
let BaseOpcode = "V6_vS32b_ai";
let isNVStorable = 1;
let DecoderNamespace = "EXT_mmvec";
@@ -30208,14 +30569,14 @@ def V6_vS32b_nt_pred_ai_128B : HInst<
(outs),
(ins PredRegs:$Pv4, IntRegs:$Rt32, s4_0Imm:$Ii, VectorRegs128B:$Vs32),
"if ($Pv4) vmem($Rt32+#$Ii):nt = $Vs32",
-CVI_VM_ST, TypeCVI_VM_ST>, Enc_9470751, Requires<[HasV60T,UseHVX]>, NewValueRel {
+tc_85d237e3, TypeCVI_VM_ST>, Enc_27b757, Requires<[HasV60T,UseHVX]>, NewValueRel {
let Inst{7-5} = 0b000;
let Inst{31-21} = 0b00101000111;
let isPredicated = 1;
let addrMode = BaseImmOffset;
let accessSize = Vector128Access;
-let mayStore = 1;
let isNonTemporal = 1;
+let mayStore = 1;
let BaseOpcode = "V6_vS32b_ai_128B";
let isNVStorable = 1;
let DecoderNamespace = "EXT_mmvec";
@@ -30225,15 +30586,15 @@ def V6_vS32b_nt_pred_pi : HInst<
(outs IntRegs:$Rx32),
(ins PredRegs:$Pv4, IntRegs:$Rx32in, s3_0Imm:$Ii, VectorRegs:$Vs32),
"if ($Pv4) vmem($Rx32++#$Ii):nt = $Vs32",
-CVI_VM_ST, TypeCVI_VM_ST>, Enc_15459921, Requires<[HasV60T,UseHVX]>, NewValueRel {
+tc_0317c6ca, TypeCVI_VM_ST>, Enc_865390, Requires<[HasV60T,UseHVX]>, NewValueRel {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00101001111;
let isPredicated = 1;
let addrMode = PostInc;
let accessSize = Vector64Access;
-let mayStore = 1;
let isNonTemporal = 1;
+let mayStore = 1;
let BaseOpcode = "V6_vS32b_pi";
let isNVStorable = 1;
let DecoderNamespace = "EXT_mmvec";
@@ -30243,15 +30604,15 @@ def V6_vS32b_nt_pred_pi_128B : HInst<
(outs IntRegs:$Rx32),
(ins PredRegs:$Pv4, IntRegs:$Rx32in, s3_0Imm:$Ii, VectorRegs128B:$Vs32),
"if ($Pv4) vmem($Rx32++#$Ii):nt = $Vs32",
-CVI_VM_ST, TypeCVI_VM_ST>, Enc_14459927, Requires<[HasV60T,UseHVX]>, NewValueRel {
+tc_0317c6ca, TypeCVI_VM_ST>, Enc_865390, Requires<[HasV60T,UseHVX]>, NewValueRel {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00101001111;
let isPredicated = 1;
let addrMode = PostInc;
let accessSize = Vector128Access;
-let mayStore = 1;
let isNonTemporal = 1;
+let mayStore = 1;
let BaseOpcode = "V6_vS32b_pi_128B";
let isNVStorable = 1;
let DecoderNamespace = "EXT_mmvec";
@@ -30262,14 +30623,14 @@ def V6_vS32b_nt_pred_ppu : HInst<
(outs IntRegs:$Rx32),
(ins PredRegs:$Pv4, IntRegs:$Rx32in, ModRegs:$Mu2, VectorRegs:$Vs32),
"if ($Pv4) vmem($Rx32++$Mu2):nt = $Vs32",
-CVI_VM_ST, TypeCVI_VM_ST>, Enc_15733946, Requires<[HasV60T,UseHVX]>, NewValueRel {
+tc_0317c6ca, TypeCVI_VM_ST>, Enc_1ef990, Requires<[HasV60T,UseHVX]>, NewValueRel {
let Inst{10-5} = 0b000000;
let Inst{31-21} = 0b00101011111;
let isPredicated = 1;
let addrMode = PostInc;
let accessSize = Vector64Access;
-let mayStore = 1;
let isNonTemporal = 1;
+let mayStore = 1;
let BaseOpcode = "V6_vS32b_ppu";
let isNVStorable = 1;
let DecoderNamespace = "EXT_mmvec";
@@ -30279,14 +30640,14 @@ def V6_vS32b_nt_pred_ppu_128B : HInst<
(outs IntRegs:$Rx32),
(ins PredRegs:$Pv4, IntRegs:$Rx32in, ModRegs:$Mu2, VectorRegs128B:$Vs32),
"if ($Pv4) vmem($Rx32++$Mu2):nt = $Vs32",
-CVI_VM_ST, TypeCVI_VM_ST>, Enc_15733946, Requires<[HasV60T,UseHVX]>, NewValueRel {
+tc_0317c6ca, TypeCVI_VM_ST>, Enc_1ef990, Requires<[HasV60T,UseHVX]>, NewValueRel {
let Inst{10-5} = 0b000000;
let Inst{31-21} = 0b00101011111;
let isPredicated = 1;
let addrMode = PostInc;
let accessSize = Vector128Access;
-let mayStore = 1;
let isNonTemporal = 1;
+let mayStore = 1;
let BaseOpcode = "V6_vS32b_ppu_128B";
let isNVStorable = 1;
let DecoderNamespace = "EXT_mmvec";
@@ -30297,26 +30658,26 @@ def V6_vS32b_nt_qpred_ai : HInst<
(outs),
(ins VecPredRegs:$Qv4, IntRegs:$Rt32, s4_0Imm:$Ii, VectorRegs:$Vs32),
"if ($Qv4) vmem($Rt32+#$Ii):nt = $Vs32",
-CVI_VM_ST, TypeCVI_VM_ST>, Enc_16279406, Requires<[HasV60T,UseHVX]> {
+tc_aedb9f9e, TypeCVI_VM_ST>, Enc_2ea740, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b000;
let Inst{31-21} = 0b00101000110;
let addrMode = BaseImmOffset;
let accessSize = Vector64Access;
-let mayStore = 1;
let isNonTemporal = 1;
+let mayStore = 1;
let DecoderNamespace = "EXT_mmvec";
}
def V6_vS32b_nt_qpred_ai_128B : HInst<
(outs),
(ins VecPredRegs128B:$Qv4, IntRegs:$Rt32, s4_0Imm:$Ii, VectorRegs128B:$Vs32),
"if ($Qv4) vmem($Rt32+#$Ii):nt = $Vs32",
-CVI_VM_ST, TypeCVI_VM_ST>, Enc_2703240, Requires<[HasV60T,UseHVX]> {
+tc_aedb9f9e, TypeCVI_VM_ST>, Enc_2ea740, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b000;
let Inst{31-21} = 0b00101000110;
let addrMode = BaseImmOffset;
let accessSize = Vector128Access;
-let mayStore = 1;
let isNonTemporal = 1;
+let mayStore = 1;
let DecoderNamespace = "EXT_mmvec";
let isCodeGenOnly = 1;
}
@@ -30324,14 +30685,14 @@ def V6_vS32b_nt_qpred_pi : HInst<
(outs IntRegs:$Rx32),
(ins VecPredRegs:$Qv4, IntRegs:$Rx32in, s3_0Imm:$Ii, VectorRegs:$Vs32),
"if ($Qv4) vmem($Rx32++#$Ii):nt = $Vs32",
-CVI_VM_ST, TypeCVI_VM_ST>, Enc_12397062, Requires<[HasV60T,UseHVX]> {
+tc_99093773, TypeCVI_VM_ST>, Enc_0b51ce, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00101001110;
let addrMode = PostInc;
let accessSize = Vector64Access;
-let mayStore = 1;
let isNonTemporal = 1;
+let mayStore = 1;
let DecoderNamespace = "EXT_mmvec";
let Constraints = "$Rx32 = $Rx32in";
}
@@ -30339,14 +30700,14 @@ def V6_vS32b_nt_qpred_pi_128B : HInst<
(outs IntRegs:$Rx32),
(ins VecPredRegs128B:$Qv4, IntRegs:$Rx32in, s3_0Imm:$Ii, VectorRegs128B:$Vs32),
"if ($Qv4) vmem($Rx32++#$Ii):nt = $Vs32",
-CVI_VM_ST, TypeCVI_VM_ST>, Enc_13397056, Requires<[HasV60T,UseHVX]> {
+tc_99093773, TypeCVI_VM_ST>, Enc_0b51ce, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00101001110;
let addrMode = PostInc;
let accessSize = Vector128Access;
-let mayStore = 1;
let isNonTemporal = 1;
+let mayStore = 1;
let DecoderNamespace = "EXT_mmvec";
let isCodeGenOnly = 1;
let Constraints = "$Rx32 = $Rx32in";
@@ -30355,13 +30716,13 @@ def V6_vS32b_nt_qpred_ppu : HInst<
(outs IntRegs:$Rx32),
(ins VecPredRegs:$Qv4, IntRegs:$Rx32in, ModRegs:$Mu2, VectorRegs:$Vs32),
"if ($Qv4) vmem($Rx32++$Mu2):nt = $Vs32",
-CVI_VM_ST, TypeCVI_VM_ST>, Enc_13425035, Requires<[HasV60T,UseHVX]> {
+tc_99093773, TypeCVI_VM_ST>, Enc_4dff07, Requires<[HasV60T,UseHVX]> {
let Inst{10-5} = 0b000000;
let Inst{31-21} = 0b00101011110;
let addrMode = PostInc;
let accessSize = Vector64Access;
-let mayStore = 1;
let isNonTemporal = 1;
+let mayStore = 1;
let DecoderNamespace = "EXT_mmvec";
let Constraints = "$Rx32 = $Rx32in";
}
@@ -30369,13 +30730,13 @@ def V6_vS32b_nt_qpred_ppu_128B : HInst<
(outs IntRegs:$Rx32),
(ins VecPredRegs128B:$Qv4, IntRegs:$Rx32in, ModRegs:$Mu2, VectorRegs128B:$Vs32),
"if ($Qv4) vmem($Rx32++$Mu2):nt = $Vs32",
-CVI_VM_ST, TypeCVI_VM_ST>, Enc_13425035, Requires<[HasV60T,UseHVX]> {
+tc_99093773, TypeCVI_VM_ST>, Enc_4dff07, Requires<[HasV60T,UseHVX]> {
let Inst{10-5} = 0b000000;
let Inst{31-21} = 0b00101011110;
let addrMode = PostInc;
let accessSize = Vector128Access;
-let mayStore = 1;
let isNonTemporal = 1;
+let mayStore = 1;
let DecoderNamespace = "EXT_mmvec";
let isCodeGenOnly = 1;
let Constraints = "$Rx32 = $Rx32in";
@@ -30384,7 +30745,7 @@ def V6_vS32b_pi : HInst<
(outs IntRegs:$Rx32),
(ins IntRegs:$Rx32in, s3_0Imm:$Ii, VectorRegs:$Vs32),
"vmem($Rx32++#$Ii) = $Vs32",
-CVI_VM_ST, TypeCVI_VM_ST>, Enc_3296020, Requires<[HasV60T,UseHVX]>, NewValueRel {
+tc_a4c9df3b, TypeCVI_VM_ST>, Enc_b62ef7, Requires<[HasV60T,UseHVX]>, NewValueRel {
let Inst{7-5} = 0b000;
let Inst{13-11} = 0b000;
let Inst{31-21} = 0b00101001001;
@@ -30401,7 +30762,7 @@ def V6_vS32b_pi_128B : HInst<
(outs IntRegs:$Rx32),
(ins IntRegs:$Rx32in, s3_0Imm:$Ii, VectorRegs128B:$Vs32),
"vmem($Rx32++#$Ii) = $Vs32",
-CVI_VM_ST, TypeCVI_VM_ST>, Enc_2296022, Requires<[HasV60T,UseHVX]>, NewValueRel {
+tc_a4c9df3b, TypeCVI_VM_ST>, Enc_b62ef7, Requires<[HasV60T,UseHVX]>, NewValueRel {
let Inst{7-5} = 0b000;
let Inst{13-11} = 0b000;
let Inst{31-21} = 0b00101001001;
@@ -30419,7 +30780,7 @@ def V6_vS32b_ppu : HInst<
(outs IntRegs:$Rx32),
(ins IntRegs:$Rx32in, ModRegs:$Mu2, VectorRegs:$Vs32),
"vmem($Rx32++$Mu2) = $Vs32",
-CVI_VM_ST, TypeCVI_VM_ST>, Enc_11281763, Requires<[HasV60T,UseHVX]>, NewValueRel {
+tc_a4c9df3b, TypeCVI_VM_ST>, Enc_d15d19, Requires<[HasV60T,UseHVX]>, NewValueRel {
let Inst{12-5} = 0b00000000;
let Inst{31-21} = 0b00101011001;
let addrMode = PostInc;
@@ -30434,7 +30795,7 @@ def V6_vS32b_ppu_128B : HInst<
(outs IntRegs:$Rx32),
(ins IntRegs:$Rx32in, ModRegs:$Mu2, VectorRegs128B:$Vs32),
"vmem($Rx32++$Mu2) = $Vs32",
-CVI_VM_ST, TypeCVI_VM_ST>, Enc_11281763, Requires<[HasV60T,UseHVX]>, NewValueRel {
+tc_a4c9df3b, TypeCVI_VM_ST>, Enc_d15d19, Requires<[HasV60T,UseHVX]>, NewValueRel {
let Inst{12-5} = 0b00000000;
let Inst{31-21} = 0b00101011001;
let addrMode = PostInc;
@@ -30450,7 +30811,7 @@ def V6_vS32b_pred_ai : HInst<
(outs),
(ins PredRegs:$Pv4, IntRegs:$Rt32, s4_0Imm:$Ii, VectorRegs:$Vs32),
"if ($Pv4) vmem($Rt32+#$Ii) = $Vs32",
-CVI_VM_ST, TypeCVI_VM_ST>, Enc_10075393, Requires<[HasV60T,UseHVX]>, NewValueRel {
+tc_85d237e3, TypeCVI_VM_ST>, Enc_27b757, Requires<[HasV60T,UseHVX]>, NewValueRel {
let Inst{7-5} = 0b000;
let Inst{31-21} = 0b00101000101;
let isPredicated = 1;
@@ -30465,7 +30826,7 @@ def V6_vS32b_pred_ai_128B : HInst<
(outs),
(ins PredRegs:$Pv4, IntRegs:$Rt32, s4_0Imm:$Ii, VectorRegs128B:$Vs32),
"if ($Pv4) vmem($Rt32+#$Ii) = $Vs32",
-CVI_VM_ST, TypeCVI_VM_ST>, Enc_9470751, Requires<[HasV60T,UseHVX]>, NewValueRel {
+tc_85d237e3, TypeCVI_VM_ST>, Enc_27b757, Requires<[HasV60T,UseHVX]>, NewValueRel {
let Inst{7-5} = 0b000;
let Inst{31-21} = 0b00101000101;
let isPredicated = 1;
@@ -30481,7 +30842,7 @@ def V6_vS32b_pred_pi : HInst<
(outs IntRegs:$Rx32),
(ins PredRegs:$Pv4, IntRegs:$Rx32in, s3_0Imm:$Ii, VectorRegs:$Vs32),
"if ($Pv4) vmem($Rx32++#$Ii) = $Vs32",
-CVI_VM_ST, TypeCVI_VM_ST>, Enc_15459921, Requires<[HasV60T,UseHVX]> {
+tc_0317c6ca, TypeCVI_VM_ST>, Enc_865390, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00101001101;
@@ -30498,7 +30859,7 @@ def V6_vS32b_pred_pi_128B : HInst<
(outs IntRegs:$Rx32),
(ins PredRegs:$Pv4, IntRegs:$Rx32in, s3_0Imm:$Ii, VectorRegs128B:$Vs32),
"if ($Pv4) vmem($Rx32++#$Ii) = $Vs32",
-CVI_VM_ST, TypeCVI_VM_ST>, Enc_14459927, Requires<[HasV60T,UseHVX]> {
+tc_0317c6ca, TypeCVI_VM_ST>, Enc_865390, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00101001101;
@@ -30516,7 +30877,7 @@ def V6_vS32b_pred_ppu : HInst<
(outs IntRegs:$Rx32),
(ins PredRegs:$Pv4, IntRegs:$Rx32in, ModRegs:$Mu2, VectorRegs:$Vs32),
"if ($Pv4) vmem($Rx32++$Mu2) = $Vs32",
-CVI_VM_ST, TypeCVI_VM_ST>, Enc_15733946, Requires<[HasV60T,UseHVX]> {
+tc_0317c6ca, TypeCVI_VM_ST>, Enc_1ef990, Requires<[HasV60T,UseHVX]> {
let Inst{10-5} = 0b000000;
let Inst{31-21} = 0b00101011101;
let isPredicated = 1;
@@ -30531,7 +30892,7 @@ def V6_vS32b_pred_ppu_128B : HInst<
(outs IntRegs:$Rx32),
(ins PredRegs:$Pv4, IntRegs:$Rx32in, ModRegs:$Mu2, VectorRegs128B:$Vs32),
"if ($Pv4) vmem($Rx32++$Mu2) = $Vs32",
-CVI_VM_ST, TypeCVI_VM_ST>, Enc_15733946, Requires<[HasV60T,UseHVX]> {
+tc_0317c6ca, TypeCVI_VM_ST>, Enc_1ef990, Requires<[HasV60T,UseHVX]> {
let Inst{10-5} = 0b000000;
let Inst{31-21} = 0b00101011101;
let isPredicated = 1;
@@ -30547,7 +30908,7 @@ def V6_vS32b_qpred_ai : HInst<
(outs),
(ins VecPredRegs:$Qv4, IntRegs:$Rt32, s4_0Imm:$Ii, VectorRegs:$Vs32),
"if ($Qv4) vmem($Rt32+#$Ii) = $Vs32",
-CVI_VM_ST, TypeCVI_VM_ST>, Enc_16279406, Requires<[HasV60T,UseHVX]> {
+tc_aedb9f9e, TypeCVI_VM_ST>, Enc_2ea740, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b000;
let Inst{31-21} = 0b00101000100;
let addrMode = BaseImmOffset;
@@ -30559,7 +30920,7 @@ def V6_vS32b_qpred_ai_128B : HInst<
(outs),
(ins VecPredRegs128B:$Qv4, IntRegs:$Rt32, s4_0Imm:$Ii, VectorRegs128B:$Vs32),
"if ($Qv4) vmem($Rt32+#$Ii) = $Vs32",
-CVI_VM_ST, TypeCVI_VM_ST>, Enc_2703240, Requires<[HasV60T,UseHVX]> {
+tc_aedb9f9e, TypeCVI_VM_ST>, Enc_2ea740, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b000;
let Inst{31-21} = 0b00101000100;
let addrMode = BaseImmOffset;
@@ -30572,7 +30933,7 @@ def V6_vS32b_qpred_pi : HInst<
(outs IntRegs:$Rx32),
(ins VecPredRegs:$Qv4, IntRegs:$Rx32in, s3_0Imm:$Ii, VectorRegs:$Vs32),
"if ($Qv4) vmem($Rx32++#$Ii) = $Vs32",
-CVI_VM_ST, TypeCVI_VM_ST>, Enc_12397062, Requires<[HasV60T,UseHVX]> {
+tc_99093773, TypeCVI_VM_ST>, Enc_0b51ce, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00101001100;
@@ -30586,7 +30947,7 @@ def V6_vS32b_qpred_pi_128B : HInst<
(outs IntRegs:$Rx32),
(ins VecPredRegs128B:$Qv4, IntRegs:$Rx32in, s3_0Imm:$Ii, VectorRegs128B:$Vs32),
"if ($Qv4) vmem($Rx32++#$Ii) = $Vs32",
-CVI_VM_ST, TypeCVI_VM_ST>, Enc_13397056, Requires<[HasV60T,UseHVX]> {
+tc_99093773, TypeCVI_VM_ST>, Enc_0b51ce, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00101001100;
@@ -30601,7 +30962,7 @@ def V6_vS32b_qpred_ppu : HInst<
(outs IntRegs:$Rx32),
(ins VecPredRegs:$Qv4, IntRegs:$Rx32in, ModRegs:$Mu2, VectorRegs:$Vs32),
"if ($Qv4) vmem($Rx32++$Mu2) = $Vs32",
-CVI_VM_ST, TypeCVI_VM_ST>, Enc_13425035, Requires<[HasV60T,UseHVX]> {
+tc_99093773, TypeCVI_VM_ST>, Enc_4dff07, Requires<[HasV60T,UseHVX]> {
let Inst{10-5} = 0b000000;
let Inst{31-21} = 0b00101011100;
let addrMode = PostInc;
@@ -30614,7 +30975,7 @@ def V6_vS32b_qpred_ppu_128B : HInst<
(outs IntRegs:$Rx32),
(ins VecPredRegs128B:$Qv4, IntRegs:$Rx32in, ModRegs:$Mu2, VectorRegs128B:$Vs32),
"if ($Qv4) vmem($Rx32++$Mu2) = $Vs32",
-CVI_VM_ST, TypeCVI_VM_ST>, Enc_13425035, Requires<[HasV60T,UseHVX]> {
+tc_99093773, TypeCVI_VM_ST>, Enc_4dff07, Requires<[HasV60T,UseHVX]> {
let Inst{10-5} = 0b000000;
let Inst{31-21} = 0b00101011100;
let addrMode = PostInc;
@@ -30628,7 +30989,7 @@ def V6_vabsdiffh : HInst<
(outs VectorRegs:$Vd32),
(ins VectorRegs:$Vu32, VectorRegs:$Vv32),
"$Vd32.uh = vabsdiff($Vu32.h,$Vv32.h)",
-CVI_VX, TypeCVI_VX>, Enc_6223403, Requires<[HasV60T,UseHVX]> {
+tc_908a4c8c, TypeCVI_VX>, Enc_45364e, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b001;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011100110;
@@ -30640,7 +31001,7 @@ def V6_vabsdiffh_128B : HInst<
(outs VectorRegs128B:$Vd32),
(ins VectorRegs128B:$Vu32, VectorRegs128B:$Vv32),
"$Vd32.uh = vabsdiff($Vu32.h,$Vv32.h)",
-CVI_VX, TypeCVI_VX>, Enc_6223403, Requires<[HasV60T,UseHVX]> {
+tc_908a4c8c, TypeCVI_VX>, Enc_45364e, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b001;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011100110;
@@ -30676,7 +31037,7 @@ def V6_vabsdiffub : HInst<
(outs VectorRegs:$Vd32),
(ins VectorRegs:$Vu32, VectorRegs:$Vv32),
"$Vd32.ub = vabsdiff($Vu32.ub,$Vv32.ub)",
-CVI_VX, TypeCVI_VX>, Enc_6223403, Requires<[HasV60T,UseHVX]> {
+tc_908a4c8c, TypeCVI_VX>, Enc_45364e, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011100110;
@@ -30688,7 +31049,7 @@ def V6_vabsdiffub_128B : HInst<
(outs VectorRegs128B:$Vd32),
(ins VectorRegs128B:$Vu32, VectorRegs128B:$Vv32),
"$Vd32.ub = vabsdiff($Vu32.ub,$Vv32.ub)",
-CVI_VX, TypeCVI_VX>, Enc_6223403, Requires<[HasV60T,UseHVX]> {
+tc_908a4c8c, TypeCVI_VX>, Enc_45364e, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011100110;
@@ -30724,7 +31085,7 @@ def V6_vabsdiffuh : HInst<
(outs VectorRegs:$Vd32),
(ins VectorRegs:$Vu32, VectorRegs:$Vv32),
"$Vd32.uh = vabsdiff($Vu32.uh,$Vv32.uh)",
-CVI_VX, TypeCVI_VX>, Enc_6223403, Requires<[HasV60T,UseHVX]> {
+tc_908a4c8c, TypeCVI_VX>, Enc_45364e, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b010;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011100110;
@@ -30736,7 +31097,7 @@ def V6_vabsdiffuh_128B : HInst<
(outs VectorRegs128B:$Vd32),
(ins VectorRegs128B:$Vu32, VectorRegs128B:$Vv32),
"$Vd32.uh = vabsdiff($Vu32.uh,$Vv32.uh)",
-CVI_VX, TypeCVI_VX>, Enc_6223403, Requires<[HasV60T,UseHVX]> {
+tc_908a4c8c, TypeCVI_VX>, Enc_45364e, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b010;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011100110;
@@ -30772,7 +31133,7 @@ def V6_vabsdiffw : HInst<
(outs VectorRegs:$Vd32),
(ins VectorRegs:$Vu32, VectorRegs:$Vv32),
"$Vd32.uw = vabsdiff($Vu32.w,$Vv32.w)",
-CVI_VX, TypeCVI_VX>, Enc_6223403, Requires<[HasV60T,UseHVX]> {
+tc_908a4c8c, TypeCVI_VX>, Enc_45364e, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b011;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011100110;
@@ -30784,7 +31145,7 @@ def V6_vabsdiffw_128B : HInst<
(outs VectorRegs128B:$Vd32),
(ins VectorRegs128B:$Vu32, VectorRegs128B:$Vv32),
"$Vd32.uw = vabsdiff($Vu32.w,$Vv32.w)",
-CVI_VX, TypeCVI_VX>, Enc_6223403, Requires<[HasV60T,UseHVX]> {
+tc_908a4c8c, TypeCVI_VX>, Enc_45364e, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b011;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011100110;
@@ -30820,7 +31181,7 @@ def V6_vabsh : HInst<
(outs VectorRegs:$Vd32),
(ins VectorRegs:$Vu32),
"$Vd32.h = vabs($Vu32.h)",
-CVI_VA, TypeCVI_VA>, Enc_900013, Requires<[HasV60T,UseHVX]> {
+tc_71337255, TypeCVI_VA>, Enc_e7581c, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b0;
let Inst{31-16} = 0b0001111000000000;
@@ -30832,7 +31193,7 @@ def V6_vabsh_128B : HInst<
(outs VectorRegs128B:$Vd32),
(ins VectorRegs128B:$Vu32),
"$Vd32.h = vabs($Vu32.h)",
-CVI_VA, TypeCVI_VA>, Enc_900013, Requires<[HasV60T,UseHVX]> {
+tc_71337255, TypeCVI_VA>, Enc_e7581c, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b0;
let Inst{31-16} = 0b0001111000000000;
@@ -30868,7 +31229,7 @@ def V6_vabsh_sat : HInst<
(outs VectorRegs:$Vd32),
(ins VectorRegs:$Vu32),
"$Vd32.h = vabs($Vu32.h):sat",
-CVI_VA, TypeCVI_VA>, Enc_900013, Requires<[HasV60T,UseHVX]> {
+tc_71337255, TypeCVI_VA>, Enc_e7581c, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b001;
let Inst{13-13} = 0b0;
let Inst{31-16} = 0b0001111000000000;
@@ -30880,7 +31241,7 @@ def V6_vabsh_sat_128B : HInst<
(outs VectorRegs128B:$Vd32),
(ins VectorRegs128B:$Vu32),
"$Vd32.h = vabs($Vu32.h):sat",
-CVI_VA, TypeCVI_VA>, Enc_900013, Requires<[HasV60T,UseHVX]> {
+tc_71337255, TypeCVI_VA>, Enc_e7581c, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b001;
let Inst{13-13} = 0b0;
let Inst{31-16} = 0b0001111000000000;
@@ -30916,7 +31277,7 @@ def V6_vabsw : HInst<
(outs VectorRegs:$Vd32),
(ins VectorRegs:$Vu32),
"$Vd32.w = vabs($Vu32.w)",
-CVI_VA, TypeCVI_VA>, Enc_900013, Requires<[HasV60T,UseHVX]> {
+tc_71337255, TypeCVI_VA>, Enc_e7581c, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b010;
let Inst{13-13} = 0b0;
let Inst{31-16} = 0b0001111000000000;
@@ -30928,7 +31289,7 @@ def V6_vabsw_128B : HInst<
(outs VectorRegs128B:$Vd32),
(ins VectorRegs128B:$Vu32),
"$Vd32.w = vabs($Vu32.w)",
-CVI_VA, TypeCVI_VA>, Enc_900013, Requires<[HasV60T,UseHVX]> {
+tc_71337255, TypeCVI_VA>, Enc_e7581c, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b010;
let Inst{13-13} = 0b0;
let Inst{31-16} = 0b0001111000000000;
@@ -30964,7 +31325,7 @@ def V6_vabsw_sat : HInst<
(outs VectorRegs:$Vd32),
(ins VectorRegs:$Vu32),
"$Vd32.w = vabs($Vu32.w):sat",
-CVI_VA, TypeCVI_VA>, Enc_900013, Requires<[HasV60T,UseHVX]> {
+tc_71337255, TypeCVI_VA>, Enc_e7581c, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b011;
let Inst{13-13} = 0b0;
let Inst{31-16} = 0b0001111000000000;
@@ -30976,7 +31337,7 @@ def V6_vabsw_sat_128B : HInst<
(outs VectorRegs128B:$Vd32),
(ins VectorRegs128B:$Vu32),
"$Vd32.w = vabs($Vu32.w):sat",
-CVI_VA, TypeCVI_VA>, Enc_900013, Requires<[HasV60T,UseHVX]> {
+tc_71337255, TypeCVI_VA>, Enc_e7581c, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b011;
let Inst{13-13} = 0b0;
let Inst{31-16} = 0b0001111000000000;
@@ -31012,7 +31373,7 @@ def V6_vaddb : HInst<
(outs VectorRegs:$Vd32),
(ins VectorRegs:$Vu32, VectorRegs:$Vv32),
"$Vd32.b = vadd($Vu32.b,$Vv32.b)",
-CVI_VA, TypeCVI_VA>, Enc_6223403, Requires<[HasV60T,UseHVX]> {
+tc_bbaf280e, TypeCVI_VA>, Enc_45364e, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b110;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011111101;
@@ -31024,7 +31385,7 @@ def V6_vaddb_128B : HInst<
(outs VectorRegs128B:$Vd32),
(ins VectorRegs128B:$Vu32, VectorRegs128B:$Vv32),
"$Vd32.b = vadd($Vu32.b,$Vv32.b)",
-CVI_VA, TypeCVI_VA>, Enc_6223403, Requires<[HasV60T,UseHVX]> {
+tc_bbaf280e, TypeCVI_VA>, Enc_45364e, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b110;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011111101;
@@ -31060,7 +31421,7 @@ def V6_vaddb_dv : HInst<
(outs VecDblRegs:$Vdd32),
(ins VecDblRegs:$Vuu32, VecDblRegs:$Vvv32),
"$Vdd32.b = vadd($Vuu32.b,$Vvv32.b)",
-CVI_VA_DV, TypeCVI_VA_DV>, Enc_13211717, Requires<[HasV60T,UseHVX]> {
+tc_97c165b9, TypeCVI_VA_DV>, Enc_f8ecf9, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b100;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011100011;
@@ -31072,7 +31433,7 @@ def V6_vaddb_dv_128B : HInst<
(outs VecDblRegs128B:$Vdd32),
(ins VecDblRegs128B:$Vuu32, VecDblRegs128B:$Vvv32),
"$Vdd32.b = vadd($Vuu32.b,$Vvv32.b)",
-CVI_VA_DV, TypeCVI_VA_DV>, Enc_13211717, Requires<[HasV60T,UseHVX]> {
+tc_97c165b9, TypeCVI_VA_DV>, Enc_f8ecf9, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b100;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011100011;
@@ -31108,7 +31469,7 @@ def V6_vaddbnq : HInst<
(outs VectorRegs:$Vx32),
(ins VecPredRegs:$Qv4, VectorRegs:$Vx32in, VectorRegs:$Vu32),
"if (!$Qv4) $Vx32.b += $Vu32.b",
-CVI_VA, TypeCVI_VA>, Enc_12535811, Requires<[HasV60T,UseHVX]> {
+tc_a3127e12, TypeCVI_VA>, Enc_a90628, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b011;
let Inst{13-13} = 0b1;
let Inst{21-16} = 0b000001;
@@ -31123,7 +31484,7 @@ def V6_vaddbnq_128B : HInst<
(outs VectorRegs128B:$Vx32),
(ins VecPredRegs128B:$Qv4, VectorRegs128B:$Vx32in, VectorRegs128B:$Vu32),
"if (!$Qv4) $Vx32.b += $Vu32.b",
-CVI_VA, TypeCVI_VA>, Enc_12535811, Requires<[HasV60T,UseHVX]> {
+tc_a3127e12, TypeCVI_VA>, Enc_a90628, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b011;
let Inst{13-13} = 0b1;
let Inst{21-16} = 0b000001;
@@ -31166,7 +31527,7 @@ def V6_vaddbq : HInst<
(outs VectorRegs:$Vx32),
(ins VecPredRegs:$Qv4, VectorRegs:$Vx32in, VectorRegs:$Vu32),
"if ($Qv4) $Vx32.b += $Vu32.b",
-CVI_VA, TypeCVI_VA>, Enc_12535811, Requires<[HasV60T,UseHVX]> {
+tc_a3127e12, TypeCVI_VA>, Enc_a90628, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b1;
let Inst{21-16} = 0b000001;
@@ -31181,7 +31542,7 @@ def V6_vaddbq_128B : HInst<
(outs VectorRegs128B:$Vx32),
(ins VecPredRegs128B:$Qv4, VectorRegs128B:$Vx32in, VectorRegs128B:$Vu32),
"if ($Qv4) $Vx32.b += $Vu32.b",
-CVI_VA, TypeCVI_VA>, Enc_12535811, Requires<[HasV60T,UseHVX]> {
+tc_a3127e12, TypeCVI_VA>, Enc_a90628, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b1;
let Inst{21-16} = 0b000001;
@@ -31224,7 +31585,7 @@ def V6_vaddbsat : HInst<
(outs VectorRegs:$Vd32),
(ins VectorRegs:$Vu32, VectorRegs:$Vv32),
"$Vd32.b = vadd($Vu32.b,$Vv32.b):sat",
-CVI_VA, TypeCVI_VA>, Enc_6223403, Requires<[HasV62T,UseHVX]> {
+tc_bbaf280e, TypeCVI_VA>, Enc_45364e, Requires<[HasV62T,UseHVX]> {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011111000;
@@ -31236,7 +31597,7 @@ def V6_vaddbsat_128B : HInst<
(outs VectorRegs128B:$Vd32),
(ins VectorRegs128B:$Vu32, VectorRegs128B:$Vv32),
"$Vd32.b = vadd($Vu32.b,$Vv32.b):sat",
-CVI_VA, TypeCVI_VA>, Enc_6223403, Requires<[HasV62T,UseHVX]> {
+tc_bbaf280e, TypeCVI_VA>, Enc_45364e, Requires<[HasV62T,UseHVX]> {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011111000;
@@ -31272,7 +31633,7 @@ def V6_vaddbsat_dv : HInst<
(outs VecDblRegs:$Vdd32),
(ins VecDblRegs:$Vuu32, VecDblRegs:$Vvv32),
"$Vdd32.b = vadd($Vuu32.b,$Vvv32.b):sat",
-CVI_VA_DV, TypeCVI_VA_DV>, Enc_13211717, Requires<[HasV62T,UseHVX]> {
+tc_97c165b9, TypeCVI_VA_DV>, Enc_f8ecf9, Requires<[HasV62T,UseHVX]> {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011110101;
@@ -31284,7 +31645,7 @@ def V6_vaddbsat_dv_128B : HInst<
(outs VecDblRegs128B:$Vdd32),
(ins VecDblRegs128B:$Vuu32, VecDblRegs128B:$Vvv32),
"$Vdd32.b = vadd($Vuu32.b,$Vvv32.b):sat",
-CVI_VA_DV, TypeCVI_VA_DV>, Enc_13211717, Requires<[HasV62T,UseHVX]> {
+tc_97c165b9, TypeCVI_VA_DV>, Enc_f8ecf9, Requires<[HasV62T,UseHVX]> {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011110101;
@@ -31320,7 +31681,7 @@ def V6_vaddcarry : HInst<
(outs VectorRegs:$Vd32, VecPredRegs:$Qx4),
(ins VectorRegs:$Vu32, VectorRegs:$Vv32, VecPredRegs:$Qx4in),
"$Vd32.w = vadd($Vu32.w,$Vv32.w,$Qx4):carry",
-CVI_VA, TypeCVI_VA>, Enc_13691337, Requires<[HasV62T,UseHVX]> {
+tc_5a9fc4ec, TypeCVI_VA>, Enc_b43b67, Requires<[HasV62T,UseHVX]> {
let Inst{7-7} = 0b0;
let Inst{13-13} = 0b1;
let Inst{31-21} = 0b00011100101;
@@ -31335,7 +31696,7 @@ def V6_vaddcarry_128B : HInst<
(outs VectorRegs128B:$Vd32, VecPredRegs128B:$Qx4),
(ins VectorRegs128B:$Vu32, VectorRegs128B:$Vv32, VecPredRegs128B:$Qx4in),
"$Vd32.w = vadd($Vu32.w,$Vv32.w,$Qx4):carry",
-CVI_VA, TypeCVI_VA>, Enc_13691337, Requires<[HasV62T,UseHVX]> {
+tc_5a9fc4ec, TypeCVI_VA>, Enc_b43b67, Requires<[HasV62T,UseHVX]> {
let Inst{7-7} = 0b0;
let Inst{13-13} = 0b1;
let Inst{31-21} = 0b00011100101;
@@ -31351,7 +31712,7 @@ def V6_vaddclbh : HInst<
(outs VectorRegs:$Vd32),
(ins VectorRegs:$Vu32, VectorRegs:$Vv32),
"$Vd32.h = vadd(vclb($Vu32.h),$Vv32.h)",
-CVI_VS, TypeCVI_VS>, Enc_6223403, Requires<[HasV62T,UseHVX]> {
+tc_45453b98, TypeCVI_VS>, Enc_45364e, Requires<[HasV62T,UseHVX]> {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b1;
let Inst{31-21} = 0b00011111000;
@@ -31363,7 +31724,7 @@ def V6_vaddclbh_128B : HInst<
(outs VectorRegs128B:$Vd32),
(ins VectorRegs128B:$Vu32, VectorRegs128B:$Vv32),
"$Vd32.h = vadd(vclb($Vu32.h),$Vv32.h)",
-CVI_VS, TypeCVI_VS>, Enc_6223403, Requires<[HasV62T,UseHVX]> {
+tc_45453b98, TypeCVI_VS>, Enc_45364e, Requires<[HasV62T,UseHVX]> {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b1;
let Inst{31-21} = 0b00011111000;
@@ -31376,7 +31737,7 @@ def V6_vaddclbw : HInst<
(outs VectorRegs:$Vd32),
(ins VectorRegs:$Vu32, VectorRegs:$Vv32),
"$Vd32.w = vadd(vclb($Vu32.w),$Vv32.w)",
-CVI_VS, TypeCVI_VS>, Enc_6223403, Requires<[HasV62T,UseHVX]> {
+tc_45453b98, TypeCVI_VS>, Enc_45364e, Requires<[HasV62T,UseHVX]> {
let Inst{7-5} = 0b001;
let Inst{13-13} = 0b1;
let Inst{31-21} = 0b00011111000;
@@ -31388,7 +31749,7 @@ def V6_vaddclbw_128B : HInst<
(outs VectorRegs128B:$Vd32),
(ins VectorRegs128B:$Vu32, VectorRegs128B:$Vv32),
"$Vd32.w = vadd(vclb($Vu32.w),$Vv32.w)",
-CVI_VS, TypeCVI_VS>, Enc_6223403, Requires<[HasV62T,UseHVX]> {
+tc_45453b98, TypeCVI_VS>, Enc_45364e, Requires<[HasV62T,UseHVX]> {
let Inst{7-5} = 0b001;
let Inst{13-13} = 0b1;
let Inst{31-21} = 0b00011111000;
@@ -31401,7 +31762,7 @@ def V6_vaddh : HInst<
(outs VectorRegs:$Vd32),
(ins VectorRegs:$Vu32, VectorRegs:$Vv32),
"$Vd32.h = vadd($Vu32.h,$Vv32.h)",
-CVI_VA, TypeCVI_VA>, Enc_6223403, Requires<[HasV60T,UseHVX]> {
+tc_bbaf280e, TypeCVI_VA>, Enc_45364e, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b111;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011111101;
@@ -31413,7 +31774,7 @@ def V6_vaddh_128B : HInst<
(outs VectorRegs128B:$Vd32),
(ins VectorRegs128B:$Vu32, VectorRegs128B:$Vv32),
"$Vd32.h = vadd($Vu32.h,$Vv32.h)",
-CVI_VA, TypeCVI_VA>, Enc_6223403, Requires<[HasV60T,UseHVX]> {
+tc_bbaf280e, TypeCVI_VA>, Enc_45364e, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b111;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011111101;
@@ -31449,7 +31810,7 @@ def V6_vaddh_dv : HInst<
(outs VecDblRegs:$Vdd32),
(ins VecDblRegs:$Vuu32, VecDblRegs:$Vvv32),
"$Vdd32.h = vadd($Vuu32.h,$Vvv32.h)",
-CVI_VA_DV, TypeCVI_VA_DV>, Enc_13211717, Requires<[HasV60T,UseHVX]> {
+tc_97c165b9, TypeCVI_VA_DV>, Enc_f8ecf9, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b101;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011100011;
@@ -31461,7 +31822,7 @@ def V6_vaddh_dv_128B : HInst<
(outs VecDblRegs128B:$Vdd32),
(ins VecDblRegs128B:$Vuu32, VecDblRegs128B:$Vvv32),
"$Vdd32.h = vadd($Vuu32.h,$Vvv32.h)",
-CVI_VA_DV, TypeCVI_VA_DV>, Enc_13211717, Requires<[HasV60T,UseHVX]> {
+tc_97c165b9, TypeCVI_VA_DV>, Enc_f8ecf9, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b101;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011100011;
@@ -31497,7 +31858,7 @@ def V6_vaddhnq : HInst<
(outs VectorRegs:$Vx32),
(ins VecPredRegs:$Qv4, VectorRegs:$Vx32in, VectorRegs:$Vu32),
"if (!$Qv4) $Vx32.h += $Vu32.h",
-CVI_VA, TypeCVI_VA>, Enc_12535811, Requires<[HasV60T,UseHVX]> {
+tc_a3127e12, TypeCVI_VA>, Enc_a90628, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b100;
let Inst{13-13} = 0b1;
let Inst{21-16} = 0b000001;
@@ -31512,7 +31873,7 @@ def V6_vaddhnq_128B : HInst<
(outs VectorRegs128B:$Vx32),
(ins VecPredRegs128B:$Qv4, VectorRegs128B:$Vx32in, VectorRegs128B:$Vu32),
"if (!$Qv4) $Vx32.h += $Vu32.h",
-CVI_VA, TypeCVI_VA>, Enc_12535811, Requires<[HasV60T,UseHVX]> {
+tc_a3127e12, TypeCVI_VA>, Enc_a90628, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b100;
let Inst{13-13} = 0b1;
let Inst{21-16} = 0b000001;
@@ -31555,7 +31916,7 @@ def V6_vaddhq : HInst<
(outs VectorRegs:$Vx32),
(ins VecPredRegs:$Qv4, VectorRegs:$Vx32in, VectorRegs:$Vu32),
"if ($Qv4) $Vx32.h += $Vu32.h",
-CVI_VA, TypeCVI_VA>, Enc_12535811, Requires<[HasV60T,UseHVX]> {
+tc_a3127e12, TypeCVI_VA>, Enc_a90628, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b001;
let Inst{13-13} = 0b1;
let Inst{21-16} = 0b000001;
@@ -31570,7 +31931,7 @@ def V6_vaddhq_128B : HInst<
(outs VectorRegs128B:$Vx32),
(ins VecPredRegs128B:$Qv4, VectorRegs128B:$Vx32in, VectorRegs128B:$Vu32),
"if ($Qv4) $Vx32.h += $Vu32.h",
-CVI_VA, TypeCVI_VA>, Enc_12535811, Requires<[HasV60T,UseHVX]> {
+tc_a3127e12, TypeCVI_VA>, Enc_a90628, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b001;
let Inst{13-13} = 0b1;
let Inst{21-16} = 0b000001;
@@ -31613,7 +31974,7 @@ def V6_vaddhsat : HInst<
(outs VectorRegs:$Vd32),
(ins VectorRegs:$Vu32, VectorRegs:$Vv32),
"$Vd32.h = vadd($Vu32.h,$Vv32.h):sat",
-CVI_VA, TypeCVI_VA>, Enc_6223403, Requires<[HasV60T,UseHVX]> {
+tc_bbaf280e, TypeCVI_VA>, Enc_45364e, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b011;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011100010;
@@ -31625,7 +31986,7 @@ def V6_vaddhsat_128B : HInst<
(outs VectorRegs128B:$Vd32),
(ins VectorRegs128B:$Vu32, VectorRegs128B:$Vv32),
"$Vd32.h = vadd($Vu32.h,$Vv32.h):sat",
-CVI_VA, TypeCVI_VA>, Enc_6223403, Requires<[HasV60T,UseHVX]> {
+tc_bbaf280e, TypeCVI_VA>, Enc_45364e, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b011;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011100010;
@@ -31661,7 +32022,7 @@ def V6_vaddhsat_dv : HInst<
(outs VecDblRegs:$Vdd32),
(ins VecDblRegs:$Vuu32, VecDblRegs:$Vvv32),
"$Vdd32.h = vadd($Vuu32.h,$Vvv32.h):sat",
-CVI_VA_DV, TypeCVI_VA_DV>, Enc_13211717, Requires<[HasV60T,UseHVX]> {
+tc_97c165b9, TypeCVI_VA_DV>, Enc_f8ecf9, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b001;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011100100;
@@ -31673,7 +32034,7 @@ def V6_vaddhsat_dv_128B : HInst<
(outs VecDblRegs128B:$Vdd32),
(ins VecDblRegs128B:$Vuu32, VecDblRegs128B:$Vvv32),
"$Vdd32.h = vadd($Vuu32.h,$Vvv32.h):sat",
-CVI_VA_DV, TypeCVI_VA_DV>, Enc_13211717, Requires<[HasV60T,UseHVX]> {
+tc_97c165b9, TypeCVI_VA_DV>, Enc_f8ecf9, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b001;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011100100;
@@ -31709,7 +32070,7 @@ def V6_vaddhw : HInst<
(outs VecDblRegs:$Vdd32),
(ins VectorRegs:$Vu32, VectorRegs:$Vv32),
"$Vdd32.w = vadd($Vu32.h,$Vv32.h)",
-CVI_VX_DV, TypeCVI_VX_DV>, Enc_15290236, Requires<[HasV60T,UseHVX]> {
+tc_eda67dcd, TypeCVI_VX_DV>, Enc_71bb9b, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b100;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011100101;
@@ -31721,7 +32082,7 @@ def V6_vaddhw_128B : HInst<
(outs VecDblRegs128B:$Vdd32),
(ins VectorRegs128B:$Vu32, VectorRegs128B:$Vv32),
"$Vdd32.w = vadd($Vu32.h,$Vv32.h)",
-CVI_VX_DV, TypeCVI_VX_DV>, Enc_15290236, Requires<[HasV60T,UseHVX]> {
+tc_eda67dcd, TypeCVI_VX_DV>, Enc_71bb9b, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b100;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011100101;
@@ -31734,7 +32095,7 @@ def V6_vaddhw_acc : HInst<
(outs VecDblRegs:$Vxx32),
(ins VecDblRegs:$Vxx32in, VectorRegs:$Vu32, VectorRegs:$Vv32),
"$Vxx32.w += vadd($Vu32.h,$Vv32.h)",
-CVI_VX_DV_LONG, TypeCVI_VX_DV>, Enc_5972412, Requires<[HasV62T,UseHVX]> {
+tc_e172d86a, TypeCVI_VX_DV>, Enc_3fc427, Requires<[HasV62T,UseHVX]> {
let Inst{7-5} = 0b010;
let Inst{13-13} = 0b1;
let Inst{31-21} = 0b00011100001;
@@ -31748,7 +32109,7 @@ def V6_vaddhw_acc_128B : HInst<
(outs VecDblRegs128B:$Vxx32),
(ins VecDblRegs128B:$Vxx32in, VectorRegs128B:$Vu32, VectorRegs128B:$Vv32),
"$Vxx32.w += vadd($Vu32.h,$Vv32.h)",
-CVI_VX_DV_LONG, TypeCVI_VX_DV>, Enc_5972412, Requires<[HasV62T,UseHVX]> {
+tc_e172d86a, TypeCVI_VX_DV>, Enc_3fc427, Requires<[HasV62T,UseHVX]> {
let Inst{7-5} = 0b010;
let Inst{13-13} = 0b1;
let Inst{31-21} = 0b00011100001;
@@ -31813,7 +32174,7 @@ def V6_vaddubh : HInst<
(outs VecDblRegs:$Vdd32),
(ins VectorRegs:$Vu32, VectorRegs:$Vv32),
"$Vdd32.h = vadd($Vu32.ub,$Vv32.ub)",
-CVI_VX_DV, TypeCVI_VX_DV>, Enc_15290236, Requires<[HasV60T,UseHVX]> {
+tc_eda67dcd, TypeCVI_VX_DV>, Enc_71bb9b, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b010;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011100101;
@@ -31825,7 +32186,7 @@ def V6_vaddubh_128B : HInst<
(outs VecDblRegs128B:$Vdd32),
(ins VectorRegs128B:$Vu32, VectorRegs128B:$Vv32),
"$Vdd32.h = vadd($Vu32.ub,$Vv32.ub)",
-CVI_VX_DV, TypeCVI_VX_DV>, Enc_15290236, Requires<[HasV60T,UseHVX]> {
+tc_eda67dcd, TypeCVI_VX_DV>, Enc_71bb9b, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b010;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011100101;
@@ -31838,7 +32199,7 @@ def V6_vaddubh_acc : HInst<
(outs VecDblRegs:$Vxx32),
(ins VecDblRegs:$Vxx32in, VectorRegs:$Vu32, VectorRegs:$Vv32),
"$Vxx32.h += vadd($Vu32.ub,$Vv32.ub)",
-CVI_VX_DV_LONG, TypeCVI_VX_DV>, Enc_5972412, Requires<[HasV62T,UseHVX]> {
+tc_e172d86a, TypeCVI_VX_DV>, Enc_3fc427, Requires<[HasV62T,UseHVX]> {
let Inst{7-5} = 0b101;
let Inst{13-13} = 0b1;
let Inst{31-21} = 0b00011100010;
@@ -31852,7 +32213,7 @@ def V6_vaddubh_acc_128B : HInst<
(outs VecDblRegs128B:$Vxx32),
(ins VecDblRegs128B:$Vxx32in, VectorRegs128B:$Vu32, VectorRegs128B:$Vv32),
"$Vxx32.h += vadd($Vu32.ub,$Vv32.ub)",
-CVI_VX_DV_LONG, TypeCVI_VX_DV>, Enc_5972412, Requires<[HasV62T,UseHVX]> {
+tc_e172d86a, TypeCVI_VX_DV>, Enc_3fc427, Requires<[HasV62T,UseHVX]> {
let Inst{7-5} = 0b101;
let Inst{13-13} = 0b1;
let Inst{31-21} = 0b00011100010;
@@ -31917,7 +32278,7 @@ def V6_vaddubsat : HInst<
(outs VectorRegs:$Vd32),
(ins VectorRegs:$Vu32, VectorRegs:$Vv32),
"$Vd32.ub = vadd($Vu32.ub,$Vv32.ub):sat",
-CVI_VA, TypeCVI_VA>, Enc_6223403, Requires<[HasV60T,UseHVX]> {
+tc_bbaf280e, TypeCVI_VA>, Enc_45364e, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b001;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011100010;
@@ -31929,7 +32290,7 @@ def V6_vaddubsat_128B : HInst<
(outs VectorRegs128B:$Vd32),
(ins VectorRegs128B:$Vu32, VectorRegs128B:$Vv32),
"$Vd32.ub = vadd($Vu32.ub,$Vv32.ub):sat",
-CVI_VA, TypeCVI_VA>, Enc_6223403, Requires<[HasV60T,UseHVX]> {
+tc_bbaf280e, TypeCVI_VA>, Enc_45364e, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b001;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011100010;
@@ -31965,7 +32326,7 @@ def V6_vaddubsat_dv : HInst<
(outs VecDblRegs:$Vdd32),
(ins VecDblRegs:$Vuu32, VecDblRegs:$Vvv32),
"$Vdd32.ub = vadd($Vuu32.ub,$Vvv32.ub):sat",
-CVI_VA_DV, TypeCVI_VA_DV>, Enc_13211717, Requires<[HasV60T,UseHVX]> {
+tc_97c165b9, TypeCVI_VA_DV>, Enc_f8ecf9, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b111;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011100011;
@@ -31977,7 +32338,7 @@ def V6_vaddubsat_dv_128B : HInst<
(outs VecDblRegs128B:$Vdd32),
(ins VecDblRegs128B:$Vuu32, VecDblRegs128B:$Vvv32),
"$Vdd32.ub = vadd($Vuu32.ub,$Vvv32.ub):sat",
-CVI_VA_DV, TypeCVI_VA_DV>, Enc_13211717, Requires<[HasV60T,UseHVX]> {
+tc_97c165b9, TypeCVI_VA_DV>, Enc_f8ecf9, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b111;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011100011;
@@ -32013,7 +32374,7 @@ def V6_vaddububb_sat : HInst<
(outs VectorRegs:$Vd32),
(ins VectorRegs:$Vu32, VectorRegs:$Vv32),
"$Vd32.ub = vadd($Vu32.ub,$Vv32.b):sat",
-CVI_VA, TypeCVI_VA>, Enc_6223403, Requires<[HasV62T,UseHVX]> {
+tc_bbaf280e, TypeCVI_VA>, Enc_45364e, Requires<[HasV62T,UseHVX]> {
let Inst{7-5} = 0b100;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011110101;
@@ -32025,7 +32386,7 @@ def V6_vaddububb_sat_128B : HInst<
(outs VectorRegs128B:$Vd32),
(ins VectorRegs128B:$Vu32, VectorRegs128B:$Vv32),
"$Vd32.ub = vadd($Vu32.ub,$Vv32.b):sat",
-CVI_VA, TypeCVI_VA>, Enc_6223403, Requires<[HasV62T,UseHVX]> {
+tc_bbaf280e, TypeCVI_VA>, Enc_45364e, Requires<[HasV62T,UseHVX]> {
let Inst{7-5} = 0b100;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011110101;
@@ -32038,7 +32399,7 @@ def V6_vadduhsat : HInst<
(outs VectorRegs:$Vd32),
(ins VectorRegs:$Vu32, VectorRegs:$Vv32),
"$Vd32.uh = vadd($Vu32.uh,$Vv32.uh):sat",
-CVI_VA, TypeCVI_VA>, Enc_6223403, Requires<[HasV60T,UseHVX]> {
+tc_bbaf280e, TypeCVI_VA>, Enc_45364e, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b010;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011100010;
@@ -32050,7 +32411,7 @@ def V6_vadduhsat_128B : HInst<
(outs VectorRegs128B:$Vd32),
(ins VectorRegs128B:$Vu32, VectorRegs128B:$Vv32),
"$Vd32.uh = vadd($Vu32.uh,$Vv32.uh):sat",
-CVI_VA, TypeCVI_VA>, Enc_6223403, Requires<[HasV60T,UseHVX]> {
+tc_bbaf280e, TypeCVI_VA>, Enc_45364e, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b010;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011100010;
@@ -32086,7 +32447,7 @@ def V6_vadduhsat_dv : HInst<
(outs VecDblRegs:$Vdd32),
(ins VecDblRegs:$Vuu32, VecDblRegs:$Vvv32),
"$Vdd32.uh = vadd($Vuu32.uh,$Vvv32.uh):sat",
-CVI_VA_DV, TypeCVI_VA_DV>, Enc_13211717, Requires<[HasV60T,UseHVX]> {
+tc_97c165b9, TypeCVI_VA_DV>, Enc_f8ecf9, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011100100;
@@ -32098,7 +32459,7 @@ def V6_vadduhsat_dv_128B : HInst<
(outs VecDblRegs128B:$Vdd32),
(ins VecDblRegs128B:$Vuu32, VecDblRegs128B:$Vvv32),
"$Vdd32.uh = vadd($Vuu32.uh,$Vvv32.uh):sat",
-CVI_VA_DV, TypeCVI_VA_DV>, Enc_13211717, Requires<[HasV60T,UseHVX]> {
+tc_97c165b9, TypeCVI_VA_DV>, Enc_f8ecf9, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011100100;
@@ -32134,7 +32495,7 @@ def V6_vadduhw : HInst<
(outs VecDblRegs:$Vdd32),
(ins VectorRegs:$Vu32, VectorRegs:$Vv32),
"$Vdd32.w = vadd($Vu32.uh,$Vv32.uh)",
-CVI_VX_DV, TypeCVI_VX_DV>, Enc_15290236, Requires<[HasV60T,UseHVX]> {
+tc_eda67dcd, TypeCVI_VX_DV>, Enc_71bb9b, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b011;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011100101;
@@ -32146,7 +32507,7 @@ def V6_vadduhw_128B : HInst<
(outs VecDblRegs128B:$Vdd32),
(ins VectorRegs128B:$Vu32, VectorRegs128B:$Vv32),
"$Vdd32.w = vadd($Vu32.uh,$Vv32.uh)",
-CVI_VX_DV, TypeCVI_VX_DV>, Enc_15290236, Requires<[HasV60T,UseHVX]> {
+tc_eda67dcd, TypeCVI_VX_DV>, Enc_71bb9b, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b011;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011100101;
@@ -32159,7 +32520,7 @@ def V6_vadduhw_acc : HInst<
(outs VecDblRegs:$Vxx32),
(ins VecDblRegs:$Vxx32in, VectorRegs:$Vu32, VectorRegs:$Vv32),
"$Vxx32.w += vadd($Vu32.uh,$Vv32.uh)",
-CVI_VX_DV_LONG, TypeCVI_VX_DV>, Enc_5972412, Requires<[HasV62T,UseHVX]> {
+tc_e172d86a, TypeCVI_VX_DV>, Enc_3fc427, Requires<[HasV62T,UseHVX]> {
let Inst{7-5} = 0b100;
let Inst{13-13} = 0b1;
let Inst{31-21} = 0b00011100010;
@@ -32173,7 +32534,7 @@ def V6_vadduhw_acc_128B : HInst<
(outs VecDblRegs128B:$Vxx32),
(ins VecDblRegs128B:$Vxx32in, VectorRegs128B:$Vu32, VectorRegs128B:$Vv32),
"$Vxx32.w += vadd($Vu32.uh,$Vv32.uh)",
-CVI_VX_DV_LONG, TypeCVI_VX_DV>, Enc_5972412, Requires<[HasV62T,UseHVX]> {
+tc_e172d86a, TypeCVI_VX_DV>, Enc_3fc427, Requires<[HasV62T,UseHVX]> {
let Inst{7-5} = 0b100;
let Inst{13-13} = 0b1;
let Inst{31-21} = 0b00011100010;
@@ -32238,7 +32599,7 @@ def V6_vadduwsat : HInst<
(outs VectorRegs:$Vd32),
(ins VectorRegs:$Vu32, VectorRegs:$Vv32),
"$Vd32.uw = vadd($Vu32.uw,$Vv32.uw):sat",
-CVI_VA, TypeCVI_VA>, Enc_6223403, Requires<[HasV62T,UseHVX]> {
+tc_bbaf280e, TypeCVI_VA>, Enc_45364e, Requires<[HasV62T,UseHVX]> {
let Inst{7-5} = 0b001;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011111011;
@@ -32250,7 +32611,7 @@ def V6_vadduwsat_128B : HInst<
(outs VectorRegs128B:$Vd32),
(ins VectorRegs128B:$Vu32, VectorRegs128B:$Vv32),
"$Vd32.uw = vadd($Vu32.uw,$Vv32.uw):sat",
-CVI_VA, TypeCVI_VA>, Enc_6223403, Requires<[HasV62T,UseHVX]> {
+tc_bbaf280e, TypeCVI_VA>, Enc_45364e, Requires<[HasV62T,UseHVX]> {
let Inst{7-5} = 0b001;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011111011;
@@ -32286,7 +32647,7 @@ def V6_vadduwsat_dv : HInst<
(outs VecDblRegs:$Vdd32),
(ins VecDblRegs:$Vuu32, VecDblRegs:$Vvv32),
"$Vdd32.uw = vadd($Vuu32.uw,$Vvv32.uw):sat",
-CVI_VA_DV, TypeCVI_VA_DV>, Enc_13211717, Requires<[HasV62T,UseHVX]> {
+tc_97c165b9, TypeCVI_VA_DV>, Enc_f8ecf9, Requires<[HasV62T,UseHVX]> {
let Inst{7-5} = 0b010;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011110101;
@@ -32298,7 +32659,7 @@ def V6_vadduwsat_dv_128B : HInst<
(outs VecDblRegs128B:$Vdd32),
(ins VecDblRegs128B:$Vuu32, VecDblRegs128B:$Vvv32),
"$Vdd32.uw = vadd($Vuu32.uw,$Vvv32.uw):sat",
-CVI_VA_DV, TypeCVI_VA_DV>, Enc_13211717, Requires<[HasV62T,UseHVX]> {
+tc_97c165b9, TypeCVI_VA_DV>, Enc_f8ecf9, Requires<[HasV62T,UseHVX]> {
let Inst{7-5} = 0b010;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011110101;
@@ -32334,7 +32695,7 @@ def V6_vaddw : HInst<
(outs VectorRegs:$Vd32),
(ins VectorRegs:$Vu32, VectorRegs:$Vv32),
"$Vd32.w = vadd($Vu32.w,$Vv32.w)",
-CVI_VA, TypeCVI_VA>, Enc_6223403, Requires<[HasV60T,UseHVX]> {
+tc_bbaf280e, TypeCVI_VA>, Enc_45364e, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011100010;
@@ -32346,7 +32707,7 @@ def V6_vaddw_128B : HInst<
(outs VectorRegs128B:$Vd32),
(ins VectorRegs128B:$Vu32, VectorRegs128B:$Vv32),
"$Vd32.w = vadd($Vu32.w,$Vv32.w)",
-CVI_VA, TypeCVI_VA>, Enc_6223403, Requires<[HasV60T,UseHVX]> {
+tc_bbaf280e, TypeCVI_VA>, Enc_45364e, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011100010;
@@ -32382,7 +32743,7 @@ def V6_vaddw_dv : HInst<
(outs VecDblRegs:$Vdd32),
(ins VecDblRegs:$Vuu32, VecDblRegs:$Vvv32),
"$Vdd32.w = vadd($Vuu32.w,$Vvv32.w)",
-CVI_VA_DV, TypeCVI_VA_DV>, Enc_13211717, Requires<[HasV60T,UseHVX]> {
+tc_97c165b9, TypeCVI_VA_DV>, Enc_f8ecf9, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b110;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011100011;
@@ -32394,7 +32755,7 @@ def V6_vaddw_dv_128B : HInst<
(outs VecDblRegs128B:$Vdd32),
(ins VecDblRegs128B:$Vuu32, VecDblRegs128B:$Vvv32),
"$Vdd32.w = vadd($Vuu32.w,$Vvv32.w)",
-CVI_VA_DV, TypeCVI_VA_DV>, Enc_13211717, Requires<[HasV60T,UseHVX]> {
+tc_97c165b9, TypeCVI_VA_DV>, Enc_f8ecf9, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b110;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011100011;
@@ -32430,7 +32791,7 @@ def V6_vaddwnq : HInst<
(outs VectorRegs:$Vx32),
(ins VecPredRegs:$Qv4, VectorRegs:$Vx32in, VectorRegs:$Vu32),
"if (!$Qv4) $Vx32.w += $Vu32.w",
-CVI_VA, TypeCVI_VA>, Enc_12535811, Requires<[HasV60T,UseHVX]> {
+tc_a3127e12, TypeCVI_VA>, Enc_a90628, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b101;
let Inst{13-13} = 0b1;
let Inst{21-16} = 0b000001;
@@ -32445,7 +32806,7 @@ def V6_vaddwnq_128B : HInst<
(outs VectorRegs128B:$Vx32),
(ins VecPredRegs128B:$Qv4, VectorRegs128B:$Vx32in, VectorRegs128B:$Vu32),
"if (!$Qv4) $Vx32.w += $Vu32.w",
-CVI_VA, TypeCVI_VA>, Enc_12535811, Requires<[HasV60T,UseHVX]> {
+tc_a3127e12, TypeCVI_VA>, Enc_a90628, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b101;
let Inst{13-13} = 0b1;
let Inst{21-16} = 0b000001;
@@ -32488,7 +32849,7 @@ def V6_vaddwq : HInst<
(outs VectorRegs:$Vx32),
(ins VecPredRegs:$Qv4, VectorRegs:$Vx32in, VectorRegs:$Vu32),
"if ($Qv4) $Vx32.w += $Vu32.w",
-CVI_VA, TypeCVI_VA>, Enc_12535811, Requires<[HasV60T,UseHVX]> {
+tc_a3127e12, TypeCVI_VA>, Enc_a90628, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b010;
let Inst{13-13} = 0b1;
let Inst{21-16} = 0b000001;
@@ -32503,7 +32864,7 @@ def V6_vaddwq_128B : HInst<
(outs VectorRegs128B:$Vx32),
(ins VecPredRegs128B:$Qv4, VectorRegs128B:$Vx32in, VectorRegs128B:$Vu32),
"if ($Qv4) $Vx32.w += $Vu32.w",
-CVI_VA, TypeCVI_VA>, Enc_12535811, Requires<[HasV60T,UseHVX]> {
+tc_a3127e12, TypeCVI_VA>, Enc_a90628, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b010;
let Inst{13-13} = 0b1;
let Inst{21-16} = 0b000001;
@@ -32546,7 +32907,7 @@ def V6_vaddwsat : HInst<
(outs VectorRegs:$Vd32),
(ins VectorRegs:$Vu32, VectorRegs:$Vv32),
"$Vd32.w = vadd($Vu32.w,$Vv32.w):sat",
-CVI_VA, TypeCVI_VA>, Enc_6223403, Requires<[HasV60T,UseHVX]> {
+tc_bbaf280e, TypeCVI_VA>, Enc_45364e, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b100;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011100010;
@@ -32558,7 +32919,7 @@ def V6_vaddwsat_128B : HInst<
(outs VectorRegs128B:$Vd32),
(ins VectorRegs128B:$Vu32, VectorRegs128B:$Vv32),
"$Vd32.w = vadd($Vu32.w,$Vv32.w):sat",
-CVI_VA, TypeCVI_VA>, Enc_6223403, Requires<[HasV60T,UseHVX]> {
+tc_bbaf280e, TypeCVI_VA>, Enc_45364e, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b100;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011100010;
@@ -32594,7 +32955,7 @@ def V6_vaddwsat_dv : HInst<
(outs VecDblRegs:$Vdd32),
(ins VecDblRegs:$Vuu32, VecDblRegs:$Vvv32),
"$Vdd32.w = vadd($Vuu32.w,$Vvv32.w):sat",
-CVI_VA_DV, TypeCVI_VA_DV>, Enc_13211717, Requires<[HasV60T,UseHVX]> {
+tc_97c165b9, TypeCVI_VA_DV>, Enc_f8ecf9, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b010;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011100100;
@@ -32606,7 +32967,7 @@ def V6_vaddwsat_dv_128B : HInst<
(outs VecDblRegs128B:$Vdd32),
(ins VecDblRegs128B:$Vuu32, VecDblRegs128B:$Vvv32),
"$Vdd32.w = vadd($Vuu32.w,$Vvv32.w):sat",
-CVI_VA_DV, TypeCVI_VA_DV>, Enc_13211717, Requires<[HasV60T,UseHVX]> {
+tc_97c165b9, TypeCVI_VA_DV>, Enc_f8ecf9, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b010;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011100100;
@@ -32642,7 +33003,7 @@ def V6_valignb : HInst<
(outs VectorRegs:$Vd32),
(ins VectorRegs:$Vu32, VectorRegs:$Vv32, IntRegsLow8:$Rt8),
"$Vd32 = valign($Vu32,$Vv32,$Rt8)",
-CVI_VP_LONG, TypeCVI_VP>, Enc_11083408, Requires<[HasV60T,UseHVX]> {
+tc_c4b515c5, TypeCVI_VP>, Enc_a30110, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b0;
let Inst{31-24} = 0b00011011;
@@ -32654,7 +33015,7 @@ def V6_valignb_128B : HInst<
(outs VectorRegs128B:$Vd32),
(ins VectorRegs128B:$Vu32, VectorRegs128B:$Vv32, IntRegsLow8:$Rt8),
"$Vd32 = valign($Vu32,$Vv32,$Rt8)",
-CVI_VP_LONG, TypeCVI_VP>, Enc_11083408, Requires<[HasV60T,UseHVX]> {
+tc_c4b515c5, TypeCVI_VP>, Enc_a30110, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b0;
let Inst{31-24} = 0b00011011;
@@ -32667,7 +33028,7 @@ def V6_valignbi : HInst<
(outs VectorRegs:$Vd32),
(ins VectorRegs:$Vu32, VectorRegs:$Vv32, u3_0Imm:$Ii),
"$Vd32 = valign($Vu32,$Vv32,#$Ii)",
-CVI_VP_LONG, TypeCVI_VP>, Enc_7171569, Requires<[HasV60T,UseHVX]> {
+tc_c4b515c5, TypeCVI_VP>, Enc_0b2e5b, Requires<[HasV60T,UseHVX]> {
let Inst{13-13} = 0b1;
let Inst{31-21} = 0b00011110001;
let hasNewValue = 1;
@@ -32678,7 +33039,7 @@ def V6_valignbi_128B : HInst<
(outs VectorRegs128B:$Vd32),
(ins VectorRegs128B:$Vu32, VectorRegs128B:$Vv32, u3_0Imm:$Ii),
"$Vd32 = valign($Vu32,$Vv32,#$Ii)",
-CVI_VP_LONG, TypeCVI_VP>, Enc_7171569, Requires<[HasV60T,UseHVX]> {
+tc_c4b515c5, TypeCVI_VP>, Enc_0b2e5b, Requires<[HasV60T,UseHVX]> {
let Inst{13-13} = 0b1;
let Inst{31-21} = 0b00011110001;
let hasNewValue = 1;
@@ -32690,7 +33051,7 @@ def V6_vand : HInst<
(outs VectorRegs:$Vd32),
(ins VectorRegs:$Vu32, VectorRegs:$Vv32),
"$Vd32 = vand($Vu32,$Vv32)",
-CVI_VA, TypeCVI_VA>, Enc_6223403, Requires<[HasV60T,UseHVX]> {
+tc_bbaf280e, TypeCVI_VA>, Enc_45364e, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b101;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011100001;
@@ -32702,7 +33063,7 @@ def V6_vand_128B : HInst<
(outs VectorRegs128B:$Vd32),
(ins VectorRegs128B:$Vu32, VectorRegs128B:$Vv32),
"$Vd32 = vand($Vu32,$Vv32)",
-CVI_VA, TypeCVI_VA>, Enc_6223403, Requires<[HasV60T,UseHVX]> {
+tc_bbaf280e, TypeCVI_VA>, Enc_45364e, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b101;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011100001;
@@ -32715,7 +33076,7 @@ def V6_vandnqrt : HInst<
(outs VectorRegs:$Vd32),
(ins VecPredRegs:$Qu4, IntRegs:$Rt32),
"$Vd32 = vand(!$Qu4,$Rt32)",
-CVI_VX, TypeCVI_VX>, Enc_4711514, Requires<[HasV62T,UseHVX]> {
+tc_e231aa4f, TypeCVI_VX>, Enc_7b7ba8, Requires<[HasV62T,UseHVX]> {
let Inst{7-5} = 0b101;
let Inst{13-10} = 0b0001;
let Inst{31-21} = 0b00011001101;
@@ -32727,7 +33088,7 @@ def V6_vandnqrt_128B : HInst<
(outs VectorRegs128B:$Vd32),
(ins VecPredRegs128B:$Qu4, IntRegs:$Rt32),
"$Vd32 = vand(!$Qu4,$Rt32)",
-CVI_VX, TypeCVI_VX>, Enc_4711514, Requires<[HasV62T,UseHVX]> {
+tc_e231aa4f, TypeCVI_VX>, Enc_7b7ba8, Requires<[HasV62T,UseHVX]> {
let Inst{7-5} = 0b101;
let Inst{13-10} = 0b0001;
let Inst{31-21} = 0b00011001101;
@@ -32740,7 +33101,7 @@ def V6_vandnqrt_acc : HInst<
(outs VectorRegs:$Vx32),
(ins VectorRegs:$Vx32in, VecPredRegs:$Qu4, IntRegs:$Rt32),
"$Vx32 |= vand(!$Qu4,$Rt32)",
-CVI_VX, TypeCVI_VX>, Enc_4944558, Requires<[HasV62T,UseHVX]> {
+tc_9311da3f, TypeCVI_VX>, Enc_895bd9, Requires<[HasV62T,UseHVX]> {
let Inst{7-5} = 0b011;
let Inst{13-10} = 0b1001;
let Inst{31-21} = 0b00011001011;
@@ -32754,7 +33115,7 @@ def V6_vandnqrt_acc_128B : HInst<
(outs VectorRegs128B:$Vx32),
(ins VectorRegs128B:$Vx32in, VecPredRegs128B:$Qu4, IntRegs:$Rt32),
"$Vx32 |= vand(!$Qu4,$Rt32)",
-CVI_VX, TypeCVI_VX>, Enc_4944558, Requires<[HasV62T,UseHVX]> {
+tc_9311da3f, TypeCVI_VX>, Enc_895bd9, Requires<[HasV62T,UseHVX]> {
let Inst{7-5} = 0b011;
let Inst{13-10} = 0b1001;
let Inst{31-21} = 0b00011001011;
@@ -32819,7 +33180,7 @@ def V6_vandqrt : HInst<
(outs VectorRegs:$Vd32),
(ins VecPredRegs:$Qu4, IntRegs:$Rt32),
"$Vd32 = vand($Qu4,$Rt32)",
-CVI_VX_LATE, TypeCVI_VX>, Enc_4711514, Requires<[HasV60T,UseHVX]> {
+tc_e231aa4f, TypeCVI_VX_LATE>, Enc_7b7ba8, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b101;
let Inst{13-10} = 0b0000;
let Inst{31-21} = 0b00011001101;
@@ -32831,7 +33192,7 @@ def V6_vandqrt_128B : HInst<
(outs VectorRegs128B:$Vd32),
(ins VecPredRegs128B:$Qu4, IntRegs:$Rt32),
"$Vd32 = vand($Qu4,$Rt32)",
-CVI_VX_LATE, TypeCVI_VX>, Enc_4711514, Requires<[HasV60T,UseHVX]> {
+tc_e231aa4f, TypeCVI_VX_LATE>, Enc_7b7ba8, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b101;
let Inst{13-10} = 0b0000;
let Inst{31-21} = 0b00011001101;
@@ -32844,7 +33205,7 @@ def V6_vandqrt_acc : HInst<
(outs VectorRegs:$Vx32),
(ins VectorRegs:$Vx32in, VecPredRegs:$Qu4, IntRegs:$Rt32),
"$Vx32 |= vand($Qu4,$Rt32)",
-CVI_VX_LATE, TypeCVI_VX>, Enc_4944558, Requires<[HasV60T,UseHVX]> {
+tc_9311da3f, TypeCVI_VX_LATE>, Enc_895bd9, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b011;
let Inst{13-10} = 0b1000;
let Inst{31-21} = 0b00011001011;
@@ -32858,7 +33219,7 @@ def V6_vandqrt_acc_128B : HInst<
(outs VectorRegs128B:$Vx32),
(ins VectorRegs128B:$Vx32in, VecPredRegs128B:$Qu4, IntRegs:$Rt32),
"$Vx32 |= vand($Qu4,$Rt32)",
-CVI_VX_LATE, TypeCVI_VX>, Enc_4944558, Requires<[HasV60T,UseHVX]> {
+tc_9311da3f, TypeCVI_VX_LATE>, Enc_895bd9, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b011;
let Inst{13-10} = 0b1000;
let Inst{31-21} = 0b00011001011;
@@ -32923,7 +33284,7 @@ def V6_vandvnqv : HInst<
(outs VectorRegs:$Vd32),
(ins VecPredRegs:$Qv4, VectorRegs:$Vu32),
"$Vd32 = vand(!$Qv4,$Vu32)",
-CVI_VA, TypeCVI_VA>, Enc_1220199, Requires<[HasV62T,UseHVX]> {
+tc_bbaf280e, TypeCVI_VA>, Enc_c4dc92, Requires<[HasV62T,UseHVX]> {
let Inst{7-5} = 0b001;
let Inst{13-13} = 0b1;
let Inst{21-16} = 0b000011;
@@ -32936,7 +33297,7 @@ def V6_vandvnqv_128B : HInst<
(outs VectorRegs128B:$Vd32),
(ins VecPredRegs128B:$Qv4, VectorRegs128B:$Vu32),
"$Vd32 = vand(!$Qv4,$Vu32)",
-CVI_VA, TypeCVI_VA>, Enc_1220199, Requires<[HasV62T,UseHVX]> {
+tc_bbaf280e, TypeCVI_VA>, Enc_c4dc92, Requires<[HasV62T,UseHVX]> {
let Inst{7-5} = 0b001;
let Inst{13-13} = 0b1;
let Inst{21-16} = 0b000011;
@@ -32950,7 +33311,7 @@ def V6_vandvqv : HInst<
(outs VectorRegs:$Vd32),
(ins VecPredRegs:$Qv4, VectorRegs:$Vu32),
"$Vd32 = vand($Qv4,$Vu32)",
-CVI_VA, TypeCVI_VA>, Enc_1220199, Requires<[HasV62T,UseHVX]> {
+tc_bbaf280e, TypeCVI_VA>, Enc_c4dc92, Requires<[HasV62T,UseHVX]> {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b1;
let Inst{21-16} = 0b000011;
@@ -32963,7 +33324,7 @@ def V6_vandvqv_128B : HInst<
(outs VectorRegs128B:$Vd32),
(ins VecPredRegs128B:$Qv4, VectorRegs128B:$Vu32),
"$Vd32 = vand($Qv4,$Vu32)",
-CVI_VA, TypeCVI_VA>, Enc_1220199, Requires<[HasV62T,UseHVX]> {
+tc_bbaf280e, TypeCVI_VA>, Enc_c4dc92, Requires<[HasV62T,UseHVX]> {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b1;
let Inst{21-16} = 0b000011;
@@ -32977,7 +33338,7 @@ def V6_vandvrt : HInst<
(outs VecPredRegs:$Qd4),
(ins VectorRegs:$Vu32, IntRegs:$Rt32),
"$Qd4 = vand($Vu32,$Rt32)",
-CVI_VX_LATE, TypeCVI_VX>, Enc_11498120, Requires<[HasV60T,UseHVX]> {
+tc_e231aa4f, TypeCVI_VX_LATE>, Enc_0f8bab, Requires<[HasV60T,UseHVX]> {
let Inst{7-2} = 0b010010;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011001101;
@@ -32989,7 +33350,7 @@ def V6_vandvrt_128B : HInst<
(outs VecPredRegs128B:$Qd4),
(ins VectorRegs128B:$Vu32, IntRegs:$Rt32),
"$Qd4 = vand($Vu32,$Rt32)",
-CVI_VX_LATE, TypeCVI_VX>, Enc_11498120, Requires<[HasV60T,UseHVX]> {
+tc_e231aa4f, TypeCVI_VX_LATE>, Enc_0f8bab, Requires<[HasV60T,UseHVX]> {
let Inst{7-2} = 0b010010;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011001101;
@@ -33002,7 +33363,7 @@ def V6_vandvrt_acc : HInst<
(outs VecPredRegs:$Qx4),
(ins VecPredRegs:$Qx4in, VectorRegs:$Vu32, IntRegs:$Rt32),
"$Qx4 |= vand($Vu32,$Rt32)",
-CVI_VX_LATE, TypeCVI_VX>, Enc_10612292, Requires<[HasV60T,UseHVX]> {
+tc_9311da3f, TypeCVI_VX_LATE>, Enc_adf111, Requires<[HasV60T,UseHVX]> {
let Inst{7-2} = 0b100000;
let Inst{13-13} = 0b1;
let Inst{31-21} = 0b00011001011;
@@ -33016,7 +33377,7 @@ def V6_vandvrt_acc_128B : HInst<
(outs VecPredRegs128B:$Qx4),
(ins VecPredRegs128B:$Qx4in, VectorRegs128B:$Vu32, IntRegs:$Rt32),
"$Qx4 |= vand($Vu32,$Rt32)",
-CVI_VX_LATE, TypeCVI_VX>, Enc_10612292, Requires<[HasV60T,UseHVX]> {
+tc_9311da3f, TypeCVI_VX_LATE>, Enc_adf111, Requires<[HasV60T,UseHVX]> {
let Inst{7-2} = 0b100000;
let Inst{13-13} = 0b1;
let Inst{31-21} = 0b00011001011;
@@ -33081,7 +33442,7 @@ def V6_vaslh : HInst<
(outs VectorRegs:$Vd32),
(ins VectorRegs:$Vu32, IntRegs:$Rt32),
"$Vd32.h = vasl($Vu32.h,$Rt32)",
-CVI_VS, TypeCVI_VS>, Enc_16214129, Requires<[HasV60T,UseHVX]> {
+tc_41f4b64e, TypeCVI_VS>, Enc_b087ac, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011001100;
@@ -33093,7 +33454,7 @@ def V6_vaslh_128B : HInst<
(outs VectorRegs128B:$Vd32),
(ins VectorRegs128B:$Vu32, IntRegs:$Rt32),
"$Vd32.h = vasl($Vu32.h,$Rt32)",
-CVI_VS, TypeCVI_VS>, Enc_16214129, Requires<[HasV60T,UseHVX]> {
+tc_41f4b64e, TypeCVI_VS>, Enc_b087ac, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011001100;
@@ -33129,7 +33490,7 @@ def V6_vaslhv : HInst<
(outs VectorRegs:$Vd32),
(ins VectorRegs:$Vu32, VectorRegs:$Vv32),
"$Vd32.h = vasl($Vu32.h,$Vv32.h)",
-CVI_VS, TypeCVI_VS>, Enc_6223403, Requires<[HasV60T,UseHVX]> {
+tc_45453b98, TypeCVI_VS>, Enc_45364e, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b101;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011111101;
@@ -33141,7 +33502,7 @@ def V6_vaslhv_128B : HInst<
(outs VectorRegs128B:$Vd32),
(ins VectorRegs128B:$Vu32, VectorRegs128B:$Vv32),
"$Vd32.h = vasl($Vu32.h,$Vv32.h)",
-CVI_VS, TypeCVI_VS>, Enc_6223403, Requires<[HasV60T,UseHVX]> {
+tc_45453b98, TypeCVI_VS>, Enc_45364e, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b101;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011111101;
@@ -33177,7 +33538,7 @@ def V6_vaslw : HInst<
(outs VectorRegs:$Vd32),
(ins VectorRegs:$Vu32, IntRegs:$Rt32),
"$Vd32.w = vasl($Vu32.w,$Rt32)",
-CVI_VS, TypeCVI_VS>, Enc_16214129, Requires<[HasV60T,UseHVX]> {
+tc_41f4b64e, TypeCVI_VS>, Enc_b087ac, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b111;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011001011;
@@ -33189,7 +33550,7 @@ def V6_vaslw_128B : HInst<
(outs VectorRegs128B:$Vd32),
(ins VectorRegs128B:$Vu32, IntRegs:$Rt32),
"$Vd32.w = vasl($Vu32.w,$Rt32)",
-CVI_VS, TypeCVI_VS>, Enc_16214129, Requires<[HasV60T,UseHVX]> {
+tc_41f4b64e, TypeCVI_VS>, Enc_b087ac, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b111;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011001011;
@@ -33202,7 +33563,7 @@ def V6_vaslw_acc : HInst<
(outs VectorRegs:$Vx32),
(ins VectorRegs:$Vx32in, VectorRegs:$Vu32, IntRegs:$Rt32),
"$Vx32.w += vasl($Vu32.w,$Rt32)",
-CVI_VS, TypeCVI_VS>, Enc_10058269, Requires<[HasV60T,UseHVX]> {
+tc_c00bf9c9, TypeCVI_VS>, Enc_5138b3, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b010;
let Inst{13-13} = 0b1;
let Inst{31-21} = 0b00011001011;
@@ -33216,7 +33577,7 @@ def V6_vaslw_acc_128B : HInst<
(outs VectorRegs128B:$Vx32),
(ins VectorRegs128B:$Vx32in, VectorRegs128B:$Vu32, IntRegs:$Rt32),
"$Vx32.w += vasl($Vu32.w,$Rt32)",
-CVI_VS, TypeCVI_VS>, Enc_10058269, Requires<[HasV60T,UseHVX]> {
+tc_c00bf9c9, TypeCVI_VS>, Enc_5138b3, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b010;
let Inst{13-13} = 0b1;
let Inst{31-21} = 0b00011001011;
@@ -33281,7 +33642,7 @@ def V6_vaslwv : HInst<
(outs VectorRegs:$Vd32),
(ins VectorRegs:$Vu32, VectorRegs:$Vv32),
"$Vd32.w = vasl($Vu32.w,$Vv32.w)",
-CVI_VS, TypeCVI_VS>, Enc_6223403, Requires<[HasV60T,UseHVX]> {
+tc_45453b98, TypeCVI_VS>, Enc_45364e, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b100;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011111101;
@@ -33293,7 +33654,7 @@ def V6_vaslwv_128B : HInst<
(outs VectorRegs128B:$Vd32),
(ins VectorRegs128B:$Vu32, VectorRegs128B:$Vv32),
"$Vd32.w = vasl($Vu32.w,$Vv32.w)",
-CVI_VS, TypeCVI_VS>, Enc_6223403, Requires<[HasV60T,UseHVX]> {
+tc_45453b98, TypeCVI_VS>, Enc_45364e, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b100;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011111101;
@@ -33329,7 +33690,7 @@ def V6_vasrh : HInst<
(outs VectorRegs:$Vd32),
(ins VectorRegs:$Vu32, IntRegs:$Rt32),
"$Vd32.h = vasr($Vu32.h,$Rt32)",
-CVI_VS, TypeCVI_VS>, Enc_16214129, Requires<[HasV60T,UseHVX]> {
+tc_41f4b64e, TypeCVI_VS>, Enc_b087ac, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b110;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011001011;
@@ -33341,7 +33702,7 @@ def V6_vasrh_128B : HInst<
(outs VectorRegs128B:$Vd32),
(ins VectorRegs128B:$Vu32, IntRegs:$Rt32),
"$Vd32.h = vasr($Vu32.h,$Rt32)",
-CVI_VS, TypeCVI_VS>, Enc_16214129, Requires<[HasV60T,UseHVX]> {
+tc_41f4b64e, TypeCVI_VS>, Enc_b087ac, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b110;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011001011;
@@ -33377,7 +33738,7 @@ def V6_vasrhbrndsat : HInst<
(outs VectorRegs:$Vd32),
(ins VectorRegs:$Vu32, VectorRegs:$Vv32, IntRegsLow8:$Rt8),
"$Vd32.b = vasr($Vu32.h,$Vv32.h,$Rt8):rnd:sat",
-CVI_VS, TypeCVI_VS>, Enc_11083408, Requires<[HasV60T,UseHVX]> {
+tc_7fa8b40f, TypeCVI_VS>, Enc_a30110, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b1;
let Inst{31-24} = 0b00011011;
@@ -33389,7 +33750,7 @@ def V6_vasrhbrndsat_128B : HInst<
(outs VectorRegs128B:$Vd32),
(ins VectorRegs128B:$Vu32, VectorRegs128B:$Vv32, IntRegsLow8:$Rt8),
"$Vd32.b = vasr($Vu32.h,$Vv32.h,$Rt8):rnd:sat",
-CVI_VS, TypeCVI_VS>, Enc_11083408, Requires<[HasV60T,UseHVX]> {
+tc_7fa8b40f, TypeCVI_VS>, Enc_a30110, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b1;
let Inst{31-24} = 0b00011011;
@@ -33402,7 +33763,7 @@ def V6_vasrhbrndsat_alt : HInst<
(outs VectorRegs:$Vd32),
(ins VectorRegs:$Vu32, VectorRegs:$Vv32, IntRegsLow8:$Rt8),
"$Vd32 = vasrhb($Vu32,$Vv32,$Rt8):rnd:sat",
-PSEUDO, TypeMAPPING>, Requires<[HasV60T]> {
+tc_7fa8b40f, TypeMAPPING>, Requires<[HasV60T]> {
let hasNewValue = 1;
let opNewValue = 0;
let isPseudo = 1;
@@ -33412,7 +33773,7 @@ def V6_vasrhbsat : HInst<
(outs VectorRegs:$Vd32),
(ins VectorRegs:$Vu32, VectorRegs:$Vv32, IntRegsLow8:$Rt8),
"$Vd32.b = vasr($Vu32.h,$Vv32.h,$Rt8):sat",
-CVI_VS, TypeCVI_VS>, Enc_11083408, Requires<[HasV62T,UseHVX]> {
+tc_7fa8b40f, TypeCVI_VS>, Enc_a30110, Requires<[HasV62T,UseHVX]> {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b0;
let Inst{31-24} = 0b00011000;
@@ -33424,7 +33785,7 @@ def V6_vasrhbsat_128B : HInst<
(outs VectorRegs128B:$Vd32),
(ins VectorRegs128B:$Vu32, VectorRegs128B:$Vv32, IntRegsLow8:$Rt8),
"$Vd32.b = vasr($Vu32.h,$Vv32.h,$Rt8):sat",
-CVI_VS, TypeCVI_VS>, Enc_11083408, Requires<[HasV62T,UseHVX]> {
+tc_7fa8b40f, TypeCVI_VS>, Enc_a30110, Requires<[HasV62T,UseHVX]> {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b0;
let Inst{31-24} = 0b00011000;
@@ -33437,7 +33798,7 @@ def V6_vasrhubrndsat : HInst<
(outs VectorRegs:$Vd32),
(ins VectorRegs:$Vu32, VectorRegs:$Vv32, IntRegsLow8:$Rt8),
"$Vd32.ub = vasr($Vu32.h,$Vv32.h,$Rt8):rnd:sat",
-CVI_VS, TypeCVI_VS>, Enc_11083408, Requires<[HasV60T,UseHVX]> {
+tc_7fa8b40f, TypeCVI_VS>, Enc_a30110, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b111;
let Inst{13-13} = 0b0;
let Inst{31-24} = 0b00011011;
@@ -33449,7 +33810,7 @@ def V6_vasrhubrndsat_128B : HInst<
(outs VectorRegs128B:$Vd32),
(ins VectorRegs128B:$Vu32, VectorRegs128B:$Vv32, IntRegsLow8:$Rt8),
"$Vd32.ub = vasr($Vu32.h,$Vv32.h,$Rt8):rnd:sat",
-CVI_VS, TypeCVI_VS>, Enc_11083408, Requires<[HasV60T,UseHVX]> {
+tc_7fa8b40f, TypeCVI_VS>, Enc_a30110, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b111;
let Inst{13-13} = 0b0;
let Inst{31-24} = 0b00011011;
@@ -33462,7 +33823,7 @@ def V6_vasrhubrndsat_alt : HInst<
(outs VectorRegs:$Vd32),
(ins VectorRegs:$Vu32, VectorRegs:$Vv32, IntRegsLow8:$Rt8),
"$Vd32 = vasrhub($Vu32,$Vv32,$Rt8):rnd:sat",
-PSEUDO, TypeMAPPING>, Requires<[HasV60T]> {
+tc_7fa8b40f, TypeMAPPING>, Requires<[HasV60T]> {
let hasNewValue = 1;
let opNewValue = 0;
let isPseudo = 1;
@@ -33472,7 +33833,7 @@ def V6_vasrhubsat : HInst<
(outs VectorRegs:$Vd32),
(ins VectorRegs:$Vu32, VectorRegs:$Vv32, IntRegsLow8:$Rt8),
"$Vd32.ub = vasr($Vu32.h,$Vv32.h,$Rt8):sat",
-CVI_VS, TypeCVI_VS>, Enc_11083408, Requires<[HasV60T,UseHVX]> {
+tc_7fa8b40f, TypeCVI_VS>, Enc_a30110, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b110;
let Inst{13-13} = 0b0;
let Inst{31-24} = 0b00011011;
@@ -33484,7 +33845,7 @@ def V6_vasrhubsat_128B : HInst<
(outs VectorRegs128B:$Vd32),
(ins VectorRegs128B:$Vu32, VectorRegs128B:$Vv32, IntRegsLow8:$Rt8),
"$Vd32.ub = vasr($Vu32.h,$Vv32.h,$Rt8):sat",
-CVI_VS, TypeCVI_VS>, Enc_11083408, Requires<[HasV60T,UseHVX]> {
+tc_7fa8b40f, TypeCVI_VS>, Enc_a30110, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b110;
let Inst{13-13} = 0b0;
let Inst{31-24} = 0b00011011;
@@ -33497,7 +33858,7 @@ def V6_vasrhubsat_alt : HInst<
(outs VectorRegs:$Vd32),
(ins VectorRegs:$Vu32, VectorRegs:$Vv32, IntRegsLow8:$Rt8),
"$Vd32 = vasrhub($Vu32,$Vv32,$Rt8):sat",
-PSEUDO, TypeMAPPING>, Requires<[HasV60T]> {
+tc_7fa8b40f, TypeMAPPING>, Requires<[HasV60T]> {
let hasNewValue = 1;
let opNewValue = 0;
let isPseudo = 1;
@@ -33507,7 +33868,7 @@ def V6_vasrhv : HInst<
(outs VectorRegs:$Vd32),
(ins VectorRegs:$Vu32, VectorRegs:$Vv32),
"$Vd32.h = vasr($Vu32.h,$Vv32.h)",
-CVI_VS, TypeCVI_VS>, Enc_6223403, Requires<[HasV60T,UseHVX]> {
+tc_45453b98, TypeCVI_VS>, Enc_45364e, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b011;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011111101;
@@ -33519,7 +33880,7 @@ def V6_vasrhv_128B : HInst<
(outs VectorRegs128B:$Vd32),
(ins VectorRegs128B:$Vu32, VectorRegs128B:$Vv32),
"$Vd32.h = vasr($Vu32.h,$Vv32.h)",
-CVI_VS, TypeCVI_VS>, Enc_6223403, Requires<[HasV60T,UseHVX]> {
+tc_45453b98, TypeCVI_VS>, Enc_45364e, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b011;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011111101;
@@ -33555,7 +33916,7 @@ def V6_vasruwuhrndsat : HInst<
(outs VectorRegs:$Vd32),
(ins VectorRegs:$Vu32, VectorRegs:$Vv32, IntRegsLow8:$Rt8),
"$Vd32.uh = vasr($Vu32.uw,$Vv32.uw,$Rt8):rnd:sat",
-CVI_VS, TypeCVI_VS>, Enc_11083408, Requires<[HasV62T,UseHVX]> {
+tc_7fa8b40f, TypeCVI_VS>, Enc_a30110, Requires<[HasV62T,UseHVX]> {
let Inst{7-5} = 0b001;
let Inst{13-13} = 0b0;
let Inst{31-24} = 0b00011000;
@@ -33567,7 +33928,7 @@ def V6_vasruwuhrndsat_128B : HInst<
(outs VectorRegs128B:$Vd32),
(ins VectorRegs128B:$Vu32, VectorRegs128B:$Vv32, IntRegsLow8:$Rt8),
"$Vd32.uh = vasr($Vu32.uw,$Vv32.uw,$Rt8):rnd:sat",
-CVI_VS, TypeCVI_VS>, Enc_11083408, Requires<[HasV62T,UseHVX]> {
+tc_7fa8b40f, TypeCVI_VS>, Enc_a30110, Requires<[HasV62T,UseHVX]> {
let Inst{7-5} = 0b001;
let Inst{13-13} = 0b0;
let Inst{31-24} = 0b00011000;
@@ -33580,7 +33941,7 @@ def V6_vasrw : HInst<
(outs VectorRegs:$Vd32),
(ins VectorRegs:$Vu32, IntRegs:$Rt32),
"$Vd32.w = vasr($Vu32.w,$Rt32)",
-CVI_VS, TypeCVI_VS>, Enc_16214129, Requires<[HasV60T,UseHVX]> {
+tc_41f4b64e, TypeCVI_VS>, Enc_b087ac, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b101;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011001011;
@@ -33592,7 +33953,7 @@ def V6_vasrw_128B : HInst<
(outs VectorRegs128B:$Vd32),
(ins VectorRegs128B:$Vu32, IntRegs:$Rt32),
"$Vd32.w = vasr($Vu32.w,$Rt32)",
-CVI_VS, TypeCVI_VS>, Enc_16214129, Requires<[HasV60T,UseHVX]> {
+tc_41f4b64e, TypeCVI_VS>, Enc_b087ac, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b101;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011001011;
@@ -33605,7 +33966,7 @@ def V6_vasrw_acc : HInst<
(outs VectorRegs:$Vx32),
(ins VectorRegs:$Vx32in, VectorRegs:$Vu32, IntRegs:$Rt32),
"$Vx32.w += vasr($Vu32.w,$Rt32)",
-CVI_VS, TypeCVI_VS>, Enc_10058269, Requires<[HasV60T,UseHVX]> {
+tc_c00bf9c9, TypeCVI_VS>, Enc_5138b3, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b101;
let Inst{13-13} = 0b1;
let Inst{31-21} = 0b00011001011;
@@ -33619,7 +33980,7 @@ def V6_vasrw_acc_128B : HInst<
(outs VectorRegs128B:$Vx32),
(ins VectorRegs128B:$Vx32in, VectorRegs128B:$Vu32, IntRegs:$Rt32),
"$Vx32.w += vasr($Vu32.w,$Rt32)",
-CVI_VS, TypeCVI_VS>, Enc_10058269, Requires<[HasV60T,UseHVX]> {
+tc_c00bf9c9, TypeCVI_VS>, Enc_5138b3, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b101;
let Inst{13-13} = 0b1;
let Inst{31-21} = 0b00011001011;
@@ -33684,7 +34045,7 @@ def V6_vasrwh : HInst<
(outs VectorRegs:$Vd32),
(ins VectorRegs:$Vu32, VectorRegs:$Vv32, IntRegsLow8:$Rt8),
"$Vd32.h = vasr($Vu32.w,$Vv32.w,$Rt8)",
-CVI_VS, TypeCVI_VS>, Enc_11083408, Requires<[HasV60T,UseHVX]> {
+tc_7fa8b40f, TypeCVI_VS>, Enc_a30110, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b010;
let Inst{13-13} = 0b0;
let Inst{31-24} = 0b00011011;
@@ -33696,7 +34057,7 @@ def V6_vasrwh_128B : HInst<
(outs VectorRegs128B:$Vd32),
(ins VectorRegs128B:$Vu32, VectorRegs128B:$Vv32, IntRegsLow8:$Rt8),
"$Vd32.h = vasr($Vu32.w,$Vv32.w,$Rt8)",
-CVI_VS, TypeCVI_VS>, Enc_11083408, Requires<[HasV60T,UseHVX]> {
+tc_7fa8b40f, TypeCVI_VS>, Enc_a30110, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b010;
let Inst{13-13} = 0b0;
let Inst{31-24} = 0b00011011;
@@ -33709,7 +34070,7 @@ def V6_vasrwh_alt : HInst<
(outs VectorRegs:$Vd32),
(ins VectorRegs:$Vu32, VectorRegs:$Vv32, IntRegsLow8:$Rt8),
"$Vd32 = vasrwh($Vu32,$Vv32,$Rt8)",
-PSEUDO, TypeMAPPING>, Requires<[HasV60T]> {
+tc_7fa8b40f, TypeMAPPING>, Requires<[HasV60T]> {
let hasNewValue = 1;
let opNewValue = 0;
let isPseudo = 1;
@@ -33719,7 +34080,7 @@ def V6_vasrwhrndsat : HInst<
(outs VectorRegs:$Vd32),
(ins VectorRegs:$Vu32, VectorRegs:$Vv32, IntRegsLow8:$Rt8),
"$Vd32.h = vasr($Vu32.w,$Vv32.w,$Rt8):rnd:sat",
-CVI_VS, TypeCVI_VS>, Enc_11083408, Requires<[HasV60T,UseHVX]> {
+tc_7fa8b40f, TypeCVI_VS>, Enc_a30110, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b100;
let Inst{13-13} = 0b0;
let Inst{31-24} = 0b00011011;
@@ -33731,7 +34092,7 @@ def V6_vasrwhrndsat_128B : HInst<
(outs VectorRegs128B:$Vd32),
(ins VectorRegs128B:$Vu32, VectorRegs128B:$Vv32, IntRegsLow8:$Rt8),
"$Vd32.h = vasr($Vu32.w,$Vv32.w,$Rt8):rnd:sat",
-CVI_VS, TypeCVI_VS>, Enc_11083408, Requires<[HasV60T,UseHVX]> {
+tc_7fa8b40f, TypeCVI_VS>, Enc_a30110, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b100;
let Inst{13-13} = 0b0;
let Inst{31-24} = 0b00011011;
@@ -33744,7 +34105,7 @@ def V6_vasrwhrndsat_alt : HInst<
(outs VectorRegs:$Vd32),
(ins VectorRegs:$Vu32, VectorRegs:$Vv32, IntRegsLow8:$Rt8),
"$Vd32 = vasrwh($Vu32,$Vv32,$Rt8):rnd:sat",
-PSEUDO, TypeMAPPING>, Requires<[HasV60T]> {
+tc_7fa8b40f, TypeMAPPING>, Requires<[HasV60T]> {
let hasNewValue = 1;
let opNewValue = 0;
let isPseudo = 1;
@@ -33754,7 +34115,7 @@ def V6_vasrwhsat : HInst<
(outs VectorRegs:$Vd32),
(ins VectorRegs:$Vu32, VectorRegs:$Vv32, IntRegsLow8:$Rt8),
"$Vd32.h = vasr($Vu32.w,$Vv32.w,$Rt8):sat",
-CVI_VS, TypeCVI_VS>, Enc_11083408, Requires<[HasV60T,UseHVX]> {
+tc_7fa8b40f, TypeCVI_VS>, Enc_a30110, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b011;
let Inst{13-13} = 0b0;
let Inst{31-24} = 0b00011011;
@@ -33766,7 +34127,7 @@ def V6_vasrwhsat_128B : HInst<
(outs VectorRegs128B:$Vd32),
(ins VectorRegs128B:$Vu32, VectorRegs128B:$Vv32, IntRegsLow8:$Rt8),
"$Vd32.h = vasr($Vu32.w,$Vv32.w,$Rt8):sat",
-CVI_VS, TypeCVI_VS>, Enc_11083408, Requires<[HasV60T,UseHVX]> {
+tc_7fa8b40f, TypeCVI_VS>, Enc_a30110, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b011;
let Inst{13-13} = 0b0;
let Inst{31-24} = 0b00011011;
@@ -33779,7 +34140,7 @@ def V6_vasrwhsat_alt : HInst<
(outs VectorRegs:$Vd32),
(ins VectorRegs:$Vu32, VectorRegs:$Vv32, IntRegsLow8:$Rt8),
"$Vd32 = vasrwh($Vu32,$Vv32,$Rt8):sat",
-PSEUDO, TypeMAPPING>, Requires<[HasV60T]> {
+tc_7fa8b40f, TypeMAPPING>, Requires<[HasV60T]> {
let hasNewValue = 1;
let opNewValue = 0;
let isPseudo = 1;
@@ -33789,7 +34150,7 @@ def V6_vasrwuhrndsat : HInst<
(outs VectorRegs:$Vd32),
(ins VectorRegs:$Vu32, VectorRegs:$Vv32, IntRegsLow8:$Rt8),
"$Vd32.uh = vasr($Vu32.w,$Vv32.w,$Rt8):rnd:sat",
-CVI_VS, TypeCVI_VS>, Enc_11083408, Requires<[HasV62T,UseHVX]> {
+tc_7fa8b40f, TypeCVI_VS>, Enc_a30110, Requires<[HasV62T,UseHVX]> {
let Inst{7-5} = 0b010;
let Inst{13-13} = 0b0;
let Inst{31-24} = 0b00011000;
@@ -33801,7 +34162,7 @@ def V6_vasrwuhrndsat_128B : HInst<
(outs VectorRegs128B:$Vd32),
(ins VectorRegs128B:$Vu32, VectorRegs128B:$Vv32, IntRegsLow8:$Rt8),
"$Vd32.uh = vasr($Vu32.w,$Vv32.w,$Rt8):rnd:sat",
-CVI_VS, TypeCVI_VS>, Enc_11083408, Requires<[HasV62T,UseHVX]> {
+tc_7fa8b40f, TypeCVI_VS>, Enc_a30110, Requires<[HasV62T,UseHVX]> {
let Inst{7-5} = 0b010;
let Inst{13-13} = 0b0;
let Inst{31-24} = 0b00011000;
@@ -33814,7 +34175,7 @@ def V6_vasrwuhsat : HInst<
(outs VectorRegs:$Vd32),
(ins VectorRegs:$Vu32, VectorRegs:$Vv32, IntRegsLow8:$Rt8),
"$Vd32.uh = vasr($Vu32.w,$Vv32.w,$Rt8):sat",
-CVI_VS, TypeCVI_VS>, Enc_11083408, Requires<[HasV60T,UseHVX]> {
+tc_7fa8b40f, TypeCVI_VS>, Enc_a30110, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b101;
let Inst{13-13} = 0b0;
let Inst{31-24} = 0b00011011;
@@ -33826,7 +34187,7 @@ def V6_vasrwuhsat_128B : HInst<
(outs VectorRegs128B:$Vd32),
(ins VectorRegs128B:$Vu32, VectorRegs128B:$Vv32, IntRegsLow8:$Rt8),
"$Vd32.uh = vasr($Vu32.w,$Vv32.w,$Rt8):sat",
-CVI_VS, TypeCVI_VS>, Enc_11083408, Requires<[HasV60T,UseHVX]> {
+tc_7fa8b40f, TypeCVI_VS>, Enc_a30110, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b101;
let Inst{13-13} = 0b0;
let Inst{31-24} = 0b00011011;
@@ -33839,7 +34200,7 @@ def V6_vasrwuhsat_alt : HInst<
(outs VectorRegs:$Vd32),
(ins VectorRegs:$Vu32, VectorRegs:$Vv32, IntRegsLow8:$Rt8),
"$Vd32 = vasrwuh($Vu32,$Vv32,$Rt8):sat",
-PSEUDO, TypeMAPPING>, Requires<[HasV60T]> {
+tc_7fa8b40f, TypeMAPPING>, Requires<[HasV60T]> {
let hasNewValue = 1;
let opNewValue = 0;
let isPseudo = 1;
@@ -33849,7 +34210,7 @@ def V6_vasrwv : HInst<
(outs VectorRegs:$Vd32),
(ins VectorRegs:$Vu32, VectorRegs:$Vv32),
"$Vd32.w = vasr($Vu32.w,$Vv32.w)",
-CVI_VS, TypeCVI_VS>, Enc_6223403, Requires<[HasV60T,UseHVX]> {
+tc_45453b98, TypeCVI_VS>, Enc_45364e, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011111101;
@@ -33861,7 +34222,7 @@ def V6_vasrwv_128B : HInst<
(outs VectorRegs128B:$Vd32),
(ins VectorRegs128B:$Vu32, VectorRegs128B:$Vv32),
"$Vd32.w = vasr($Vu32.w,$Vv32.w)",
-CVI_VS, TypeCVI_VS>, Enc_6223403, Requires<[HasV60T,UseHVX]> {
+tc_45453b98, TypeCVI_VS>, Enc_45364e, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011111101;
@@ -33897,7 +34258,7 @@ def V6_vassign : HInst<
(outs VectorRegs:$Vd32),
(ins VectorRegs:$Vu32),
"$Vd32 = $Vu32",
-CVI_VA, TypeCVI_VA>, Enc_900013, Requires<[HasV60T,UseHVX]> {
+tc_71337255, TypeCVI_VA>, Enc_e7581c, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b111;
let Inst{13-13} = 0b1;
let Inst{31-16} = 0b0001111000000011;
@@ -33909,7 +34270,7 @@ def V6_vassign_128B : HInst<
(outs VectorRegs128B:$Vd32),
(ins VectorRegs128B:$Vu32),
"$Vd32 = $Vu32",
-CVI_VA, TypeCVI_VA>, Enc_900013, Requires<[HasV60T,UseHVX]> {
+tc_71337255, TypeCVI_VA>, Enc_e7581c, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b111;
let Inst{13-13} = 0b1;
let Inst{31-16} = 0b0001111000000011;
@@ -33922,7 +34283,7 @@ def V6_vassignp : HInst<
(outs VecDblRegs:$Vdd32),
(ins VecDblRegs:$Vuu32),
"$Vdd32 = $Vuu32",
-CVI_VA, TypeCVI_VA>, Requires<[HasV60T,UseHVX]> {
+CVI_VA, TypeCVI_VA_DV>, Requires<[HasV60T,UseHVX]> {
let hasNewValue = 1;
let opNewValue = 0;
let isPseudo = 1;
@@ -33932,7 +34293,7 @@ def V6_vassignp_128B : HInst<
(outs VecDblRegs128B:$Vdd32),
(ins VecDblRegs128B:$Vuu32),
"$Vdd32 = $Vuu32",
-CVI_VA, TypeCVI_VA>, Requires<[HasV60T,UseHVX]> {
+CVI_VA, TypeCVI_VA_DV>, Requires<[HasV60T,UseHVX]> {
let hasNewValue = 1;
let opNewValue = 0;
let isPseudo = 1;
@@ -33943,7 +34304,7 @@ def V6_vavgh : HInst<
(outs VectorRegs:$Vd32),
(ins VectorRegs:$Vu32, VectorRegs:$Vv32),
"$Vd32.h = vavg($Vu32.h,$Vv32.h)",
-CVI_VA, TypeCVI_VA>, Enc_6223403, Requires<[HasV60T,UseHVX]> {
+tc_bbaf280e, TypeCVI_VA>, Enc_45364e, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b110;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011100110;
@@ -33955,7 +34316,7 @@ def V6_vavgh_128B : HInst<
(outs VectorRegs128B:$Vd32),
(ins VectorRegs128B:$Vu32, VectorRegs128B:$Vv32),
"$Vd32.h = vavg($Vu32.h,$Vv32.h)",
-CVI_VA, TypeCVI_VA>, Enc_6223403, Requires<[HasV60T,UseHVX]> {
+tc_bbaf280e, TypeCVI_VA>, Enc_45364e, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b110;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011100110;
@@ -33991,7 +34352,7 @@ def V6_vavghrnd : HInst<
(outs VectorRegs:$Vd32),
(ins VectorRegs:$Vu32, VectorRegs:$Vv32),
"$Vd32.h = vavg($Vu32.h,$Vv32.h):rnd",
-CVI_VA, TypeCVI_VA>, Enc_6223403, Requires<[HasV60T,UseHVX]> {
+tc_bbaf280e, TypeCVI_VA>, Enc_45364e, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b101;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011100111;
@@ -34003,7 +34364,7 @@ def V6_vavghrnd_128B : HInst<
(outs VectorRegs128B:$Vd32),
(ins VectorRegs128B:$Vu32, VectorRegs128B:$Vv32),
"$Vd32.h = vavg($Vu32.h,$Vv32.h):rnd",
-CVI_VA, TypeCVI_VA>, Enc_6223403, Requires<[HasV60T,UseHVX]> {
+tc_bbaf280e, TypeCVI_VA>, Enc_45364e, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b101;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011100111;
@@ -34039,7 +34400,7 @@ def V6_vavgub : HInst<
(outs VectorRegs:$Vd32),
(ins VectorRegs:$Vu32, VectorRegs:$Vv32),
"$Vd32.ub = vavg($Vu32.ub,$Vv32.ub)",
-CVI_VA, TypeCVI_VA>, Enc_6223403, Requires<[HasV60T,UseHVX]> {
+tc_bbaf280e, TypeCVI_VA>, Enc_45364e, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b100;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011100110;
@@ -34051,7 +34412,7 @@ def V6_vavgub_128B : HInst<
(outs VectorRegs128B:$Vd32),
(ins VectorRegs128B:$Vu32, VectorRegs128B:$Vv32),
"$Vd32.ub = vavg($Vu32.ub,$Vv32.ub)",
-CVI_VA, TypeCVI_VA>, Enc_6223403, Requires<[HasV60T,UseHVX]> {
+tc_bbaf280e, TypeCVI_VA>, Enc_45364e, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b100;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011100110;
@@ -34087,7 +34448,7 @@ def V6_vavgubrnd : HInst<
(outs VectorRegs:$Vd32),
(ins VectorRegs:$Vu32, VectorRegs:$Vv32),
"$Vd32.ub = vavg($Vu32.ub,$Vv32.ub):rnd",
-CVI_VA, TypeCVI_VA>, Enc_6223403, Requires<[HasV60T,UseHVX]> {
+tc_bbaf280e, TypeCVI_VA>, Enc_45364e, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b011;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011100111;
@@ -34099,7 +34460,7 @@ def V6_vavgubrnd_128B : HInst<
(outs VectorRegs128B:$Vd32),
(ins VectorRegs128B:$Vu32, VectorRegs128B:$Vv32),
"$Vd32.ub = vavg($Vu32.ub,$Vv32.ub):rnd",
-CVI_VA, TypeCVI_VA>, Enc_6223403, Requires<[HasV60T,UseHVX]> {
+tc_bbaf280e, TypeCVI_VA>, Enc_45364e, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b011;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011100111;
@@ -34135,7 +34496,7 @@ def V6_vavguh : HInst<
(outs VectorRegs:$Vd32),
(ins VectorRegs:$Vu32, VectorRegs:$Vv32),
"$Vd32.uh = vavg($Vu32.uh,$Vv32.uh)",
-CVI_VA, TypeCVI_VA>, Enc_6223403, Requires<[HasV60T,UseHVX]> {
+tc_bbaf280e, TypeCVI_VA>, Enc_45364e, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b101;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011100110;
@@ -34147,7 +34508,7 @@ def V6_vavguh_128B : HInst<
(outs VectorRegs128B:$Vd32),
(ins VectorRegs128B:$Vu32, VectorRegs128B:$Vv32),
"$Vd32.uh = vavg($Vu32.uh,$Vv32.uh)",
-CVI_VA, TypeCVI_VA>, Enc_6223403, Requires<[HasV60T,UseHVX]> {
+tc_bbaf280e, TypeCVI_VA>, Enc_45364e, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b101;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011100110;
@@ -34183,7 +34544,7 @@ def V6_vavguhrnd : HInst<
(outs VectorRegs:$Vd32),
(ins VectorRegs:$Vu32, VectorRegs:$Vv32),
"$Vd32.uh = vavg($Vu32.uh,$Vv32.uh):rnd",
-CVI_VA, TypeCVI_VA>, Enc_6223403, Requires<[HasV60T,UseHVX]> {
+tc_bbaf280e, TypeCVI_VA>, Enc_45364e, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b100;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011100111;
@@ -34195,7 +34556,7 @@ def V6_vavguhrnd_128B : HInst<
(outs VectorRegs128B:$Vd32),
(ins VectorRegs128B:$Vu32, VectorRegs128B:$Vv32),
"$Vd32.uh = vavg($Vu32.uh,$Vv32.uh):rnd",
-CVI_VA, TypeCVI_VA>, Enc_6223403, Requires<[HasV60T,UseHVX]> {
+tc_bbaf280e, TypeCVI_VA>, Enc_45364e, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b100;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011100111;
@@ -34231,7 +34592,7 @@ def V6_vavgw : HInst<
(outs VectorRegs:$Vd32),
(ins VectorRegs:$Vu32, VectorRegs:$Vv32),
"$Vd32.w = vavg($Vu32.w,$Vv32.w)",
-CVI_VA, TypeCVI_VA>, Enc_6223403, Requires<[HasV60T,UseHVX]> {
+tc_bbaf280e, TypeCVI_VA>, Enc_45364e, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b111;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011100110;
@@ -34243,7 +34604,7 @@ def V6_vavgw_128B : HInst<
(outs VectorRegs128B:$Vd32),
(ins VectorRegs128B:$Vu32, VectorRegs128B:$Vv32),
"$Vd32.w = vavg($Vu32.w,$Vv32.w)",
-CVI_VA, TypeCVI_VA>, Enc_6223403, Requires<[HasV60T,UseHVX]> {
+tc_bbaf280e, TypeCVI_VA>, Enc_45364e, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b111;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011100110;
@@ -34279,7 +34640,7 @@ def V6_vavgwrnd : HInst<
(outs VectorRegs:$Vd32),
(ins VectorRegs:$Vu32, VectorRegs:$Vv32),
"$Vd32.w = vavg($Vu32.w,$Vv32.w):rnd",
-CVI_VA, TypeCVI_VA>, Enc_6223403, Requires<[HasV60T,UseHVX]> {
+tc_bbaf280e, TypeCVI_VA>, Enc_45364e, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b110;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011100111;
@@ -34291,7 +34652,7 @@ def V6_vavgwrnd_128B : HInst<
(outs VectorRegs128B:$Vd32),
(ins VectorRegs128B:$Vu32, VectorRegs128B:$Vv32),
"$Vd32.w = vavg($Vu32.w,$Vv32.w):rnd",
-CVI_VA, TypeCVI_VA>, Enc_6223403, Requires<[HasV60T,UseHVX]> {
+tc_bbaf280e, TypeCVI_VA>, Enc_45364e, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b110;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011100111;
@@ -34327,7 +34688,7 @@ def V6_vccombine : HInst<
(outs VecDblRegs:$Vdd32),
(ins PredRegs:$Ps4, VectorRegs:$Vu32, VectorRegs:$Vv32),
"if ($Ps4) $Vdd32 = vcombine($Vu32,$Vv32)",
-CVI_VA_DV, TypeCVI_VA_DV>, Enc_16145290, Requires<[HasV60T,UseHVX]> {
+tc_2171ebae, TypeCVI_VA_DV>, Enc_8c2412, Requires<[HasV60T,UseHVX]> {
let Inst{7-7} = 0b0;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011010011;
@@ -34340,7 +34701,7 @@ def V6_vccombine_128B : HInst<
(outs VecDblRegs128B:$Vdd32),
(ins PredRegs:$Ps4, VectorRegs128B:$Vu32, VectorRegs128B:$Vv32),
"if ($Ps4) $Vdd32 = vcombine($Vu32,$Vv32)",
-CVI_VA_DV, TypeCVI_VA_DV>, Enc_16145290, Requires<[HasV60T,UseHVX]> {
+tc_2171ebae, TypeCVI_VA_DV>, Enc_8c2412, Requires<[HasV60T,UseHVX]> {
let Inst{7-7} = 0b0;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011010011;
@@ -34354,7 +34715,7 @@ def V6_vcl0h : HInst<
(outs VectorRegs:$Vd32),
(ins VectorRegs:$Vu32),
"$Vd32.uh = vcl0($Vu32.uh)",
-CVI_VS, TypeCVI_VS>, Enc_900013, Requires<[HasV60T,UseHVX]> {
+tc_d2cb81ea, TypeCVI_VS>, Enc_e7581c, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b111;
let Inst{13-13} = 0b0;
let Inst{31-16} = 0b0001111000000010;
@@ -34366,7 +34727,7 @@ def V6_vcl0h_128B : HInst<
(outs VectorRegs128B:$Vd32),
(ins VectorRegs128B:$Vu32),
"$Vd32.uh = vcl0($Vu32.uh)",
-CVI_VS, TypeCVI_VS>, Enc_900013, Requires<[HasV60T,UseHVX]> {
+tc_d2cb81ea, TypeCVI_VS>, Enc_e7581c, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b111;
let Inst{13-13} = 0b0;
let Inst{31-16} = 0b0001111000000010;
@@ -34402,7 +34763,7 @@ def V6_vcl0w : HInst<
(outs VectorRegs:$Vd32),
(ins VectorRegs:$Vu32),
"$Vd32.uw = vcl0($Vu32.uw)",
-CVI_VS, TypeCVI_VS>, Enc_900013, Requires<[HasV60T,UseHVX]> {
+tc_d2cb81ea, TypeCVI_VS>, Enc_e7581c, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b101;
let Inst{13-13} = 0b0;
let Inst{31-16} = 0b0001111000000010;
@@ -34414,7 +34775,7 @@ def V6_vcl0w_128B : HInst<
(outs VectorRegs128B:$Vd32),
(ins VectorRegs128B:$Vu32),
"$Vd32.uw = vcl0($Vu32.uw)",
-CVI_VS, TypeCVI_VS>, Enc_900013, Requires<[HasV60T,UseHVX]> {
+tc_d2cb81ea, TypeCVI_VS>, Enc_e7581c, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b101;
let Inst{13-13} = 0b0;
let Inst{31-16} = 0b0001111000000010;
@@ -34450,7 +34811,7 @@ def V6_vcmov : HInst<
(outs VectorRegs:$Vd32),
(ins PredRegs:$Ps4, VectorRegs:$Vu32),
"if ($Ps4) $Vd32 = $Vu32",
-CVI_VA, TypeCVI_VA>, Enc_12023037, Requires<[HasV60T,UseHVX]> {
+tc_b06ab583, TypeCVI_VA>, Enc_770858, Requires<[HasV60T,UseHVX]> {
let Inst{7-7} = 0b0;
let Inst{13-13} = 0b0;
let Inst{31-16} = 0b0001101000000000;
@@ -34463,7 +34824,7 @@ def V6_vcmov_128B : HInst<
(outs VectorRegs128B:$Vd32),
(ins PredRegs:$Ps4, VectorRegs128B:$Vu32),
"if ($Ps4) $Vd32 = $Vu32",
-CVI_VA, TypeCVI_VA>, Enc_12023037, Requires<[HasV60T,UseHVX]> {
+tc_b06ab583, TypeCVI_VA>, Enc_770858, Requires<[HasV60T,UseHVX]> {
let Inst{7-7} = 0b0;
let Inst{13-13} = 0b0;
let Inst{31-16} = 0b0001101000000000;
@@ -34477,7 +34838,7 @@ def V6_vcombine : HInst<
(outs VecDblRegs:$Vdd32),
(ins VectorRegs:$Vu32, VectorRegs:$Vv32),
"$Vdd32 = vcombine($Vu32,$Vv32)",
-CVI_VA_DV, TypeCVI_VA_DV>, Enc_15290236, Requires<[HasV60T,UseHVX]> {
+tc_97c165b9, TypeCVI_VA_DV>, Enc_71bb9b, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b111;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011111010;
@@ -34490,7 +34851,7 @@ def V6_vcombine_128B : HInst<
(outs VecDblRegs128B:$Vdd32),
(ins VectorRegs128B:$Vu32, VectorRegs128B:$Vv32),
"$Vdd32 = vcombine($Vu32,$Vv32)",
-CVI_VA_DV, TypeCVI_VA_DV>, Enc_15290236, Requires<[HasV60T,UseHVX]> {
+tc_97c165b9, TypeCVI_VA_DV>, Enc_71bb9b, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b111;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011111010;
@@ -34527,7 +34888,7 @@ def V6_vdeal : HInst<
(outs VectorRegs:$Vy32, VectorRegs:$Vx32),
(ins VectorRegs:$Vy32in, VectorRegs:$Vx32in, IntRegs:$Rt32),
"vdeal($Vy32,$Vx32,$Rt32)",
-CVI_VP_VS_LONG_EARLY, TypeCVI_VP_VS>, Enc_11422009, Requires<[HasV60T,UseHVX]> {
+tc_5c120602, TypeCVI_VP_VS>, Enc_989021, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b010;
let Inst{13-13} = 0b1;
let Inst{31-21} = 0b00011001111;
@@ -34542,7 +34903,7 @@ def V6_vdeal_128B : HInst<
(outs VectorRegs128B:$Vy32, VectorRegs128B:$Vx32),
(ins VectorRegs128B:$Vy32in, VectorRegs128B:$Vx32in, IntRegs:$Rt32),
"vdeal($Vy32,$Vx32,$Rt32)",
-CVI_VP_VS_LONG_EARLY, TypeCVI_VP_VS>, Enc_11422009, Requires<[HasV60T,UseHVX]> {
+tc_5c120602, TypeCVI_VP_VS>, Enc_989021, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b010;
let Inst{13-13} = 0b1;
let Inst{31-21} = 0b00011001111;
@@ -34558,7 +34919,7 @@ def V6_vdealb : HInst<
(outs VectorRegs:$Vd32),
(ins VectorRegs:$Vu32),
"$Vd32.b = vdeal($Vu32.b)",
-CVI_VP, TypeCVI_VP>, Enc_900013, Requires<[HasV60T,UseHVX]> {
+tc_e6299d16, TypeCVI_VP>, Enc_e7581c, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b111;
let Inst{13-13} = 0b0;
let Inst{31-16} = 0b0001111000000000;
@@ -34570,7 +34931,7 @@ def V6_vdealb4w : HInst<
(outs VectorRegs:$Vd32),
(ins VectorRegs:$Vu32, VectorRegs:$Vv32),
"$Vd32.b = vdeale($Vu32.b,$Vv32.b)",
-CVI_VP, TypeCVI_VP>, Enc_6223403, Requires<[HasV60T,UseHVX]> {
+tc_f3fc3f83, TypeCVI_VP>, Enc_45364e, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b111;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011111001;
@@ -34582,7 +34943,7 @@ def V6_vdealb4w_128B : HInst<
(outs VectorRegs128B:$Vd32),
(ins VectorRegs128B:$Vu32, VectorRegs128B:$Vv32),
"$Vd32.b = vdeale($Vu32.b,$Vv32.b)",
-CVI_VP, TypeCVI_VP>, Enc_6223403, Requires<[HasV60T,UseHVX]> {
+tc_f3fc3f83, TypeCVI_VP>, Enc_45364e, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b111;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011111001;
@@ -34618,7 +34979,7 @@ def V6_vdealb_128B : HInst<
(outs VectorRegs128B:$Vd32),
(ins VectorRegs128B:$Vu32),
"$Vd32.b = vdeal($Vu32.b)",
-CVI_VP, TypeCVI_VP>, Enc_900013, Requires<[HasV60T,UseHVX]> {
+tc_e6299d16, TypeCVI_VP>, Enc_e7581c, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b111;
let Inst{13-13} = 0b0;
let Inst{31-16} = 0b0001111000000000;
@@ -34654,7 +35015,7 @@ def V6_vdealh : HInst<
(outs VectorRegs:$Vd32),
(ins VectorRegs:$Vu32),
"$Vd32.h = vdeal($Vu32.h)",
-CVI_VP, TypeCVI_VP>, Enc_900013, Requires<[HasV60T,UseHVX]> {
+tc_e6299d16, TypeCVI_VP>, Enc_e7581c, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b110;
let Inst{13-13} = 0b0;
let Inst{31-16} = 0b0001111000000000;
@@ -34666,7 +35027,7 @@ def V6_vdealh_128B : HInst<
(outs VectorRegs128B:$Vd32),
(ins VectorRegs128B:$Vu32),
"$Vd32.h = vdeal($Vu32.h)",
-CVI_VP, TypeCVI_VP>, Enc_900013, Requires<[HasV60T,UseHVX]> {
+tc_e6299d16, TypeCVI_VP>, Enc_e7581c, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b110;
let Inst{13-13} = 0b0;
let Inst{31-16} = 0b0001111000000000;
@@ -34702,7 +35063,7 @@ def V6_vdealvdd : HInst<
(outs VecDblRegs:$Vdd32),
(ins VectorRegs:$Vu32, VectorRegs:$Vv32, IntRegsLow8:$Rt8),
"$Vdd32 = vdeal($Vu32,$Vv32,$Rt8)",
-CVI_VP_VS_LONG, TypeCVI_VP_VS>, Enc_14767681, Requires<[HasV60T,UseHVX]> {
+tc_4e2a5159, TypeCVI_VP_VS>, Enc_24a7dc, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b100;
let Inst{13-13} = 0b1;
let Inst{31-24} = 0b00011011;
@@ -34714,7 +35075,7 @@ def V6_vdealvdd_128B : HInst<
(outs VecDblRegs128B:$Vdd32),
(ins VectorRegs128B:$Vu32, VectorRegs128B:$Vv32, IntRegsLow8:$Rt8),
"$Vdd32 = vdeal($Vu32,$Vv32,$Rt8)",
-CVI_VP_VS_LONG, TypeCVI_VP_VS>, Enc_14767681, Requires<[HasV60T,UseHVX]> {
+tc_4e2a5159, TypeCVI_VP_VS>, Enc_24a7dc, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b100;
let Inst{13-13} = 0b1;
let Inst{31-24} = 0b00011011;
@@ -34727,7 +35088,7 @@ def V6_vdelta : HInst<
(outs VectorRegs:$Vd32),
(ins VectorRegs:$Vu32, VectorRegs:$Vv32),
"$Vd32 = vdelta($Vu32,$Vv32)",
-CVI_VP, TypeCVI_VP>, Enc_6223403, Requires<[HasV60T,UseHVX]> {
+tc_f3fc3f83, TypeCVI_VP>, Enc_45364e, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b001;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011111001;
@@ -34739,7 +35100,7 @@ def V6_vdelta_128B : HInst<
(outs VectorRegs128B:$Vd32),
(ins VectorRegs128B:$Vu32, VectorRegs128B:$Vv32),
"$Vd32 = vdelta($Vu32,$Vv32)",
-CVI_VP, TypeCVI_VP>, Enc_6223403, Requires<[HasV60T,UseHVX]> {
+tc_f3fc3f83, TypeCVI_VP>, Enc_45364e, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b001;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011111001;
@@ -34752,7 +35113,7 @@ def V6_vdmpybus : HInst<
(outs VectorRegs:$Vd32),
(ins VectorRegs:$Vu32, IntRegs:$Rt32),
"$Vd32.h = vdmpy($Vu32.ub,$Rt32.b)",
-CVI_VX, TypeCVI_VX>, Enc_16214129, Requires<[HasV60T,UseHVX]> {
+tc_69b6dd20, TypeCVI_VX>, Enc_b087ac, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b110;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011001000;
@@ -34764,7 +35125,7 @@ def V6_vdmpybus_128B : HInst<
(outs VectorRegs128B:$Vd32),
(ins VectorRegs128B:$Vu32, IntRegs:$Rt32),
"$Vd32.h = vdmpy($Vu32.ub,$Rt32.b)",
-CVI_VX, TypeCVI_VX>, Enc_16214129, Requires<[HasV60T,UseHVX]> {
+tc_69b6dd20, TypeCVI_VX>, Enc_b087ac, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b110;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011001000;
@@ -34777,7 +35138,7 @@ def V6_vdmpybus_acc : HInst<
(outs VectorRegs:$Vx32),
(ins VectorRegs:$Vx32in, VectorRegs:$Vu32, IntRegs:$Rt32),
"$Vx32.h += vdmpy($Vu32.ub,$Rt32.b)",
-CVI_VX, TypeCVI_VX>, Enc_10058269, Requires<[HasV60T,UseHVX]> {
+tc_d725e5b0, TypeCVI_VX>, Enc_5138b3, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b110;
let Inst{13-13} = 0b1;
let Inst{31-21} = 0b00011001000;
@@ -34791,7 +35152,7 @@ def V6_vdmpybus_acc_128B : HInst<
(outs VectorRegs128B:$Vx32),
(ins VectorRegs128B:$Vx32in, VectorRegs128B:$Vu32, IntRegs:$Rt32),
"$Vx32.h += vdmpy($Vu32.ub,$Rt32.b)",
-CVI_VX, TypeCVI_VX>, Enc_10058269, Requires<[HasV60T,UseHVX]> {
+tc_d725e5b0, TypeCVI_VX>, Enc_5138b3, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b110;
let Inst{13-13} = 0b1;
let Inst{31-21} = 0b00011001000;
@@ -34856,7 +35217,7 @@ def V6_vdmpybus_dv : HInst<
(outs VecDblRegs:$Vdd32),
(ins VecDblRegs:$Vuu32, IntRegs:$Rt32),
"$Vdd32.h = vdmpy($Vuu32.ub,$Rt32.b)",
-CVI_VX_DV, TypeCVI_VX_DV>, Enc_5023792, Requires<[HasV60T,UseHVX]> {
+tc_7c3f55c4, TypeCVI_VX_DV>, Enc_aad80c, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b111;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011001000;
@@ -34868,7 +35229,7 @@ def V6_vdmpybus_dv_128B : HInst<
(outs VecDblRegs128B:$Vdd32),
(ins VecDblRegs128B:$Vuu32, IntRegs:$Rt32),
"$Vdd32.h = vdmpy($Vuu32.ub,$Rt32.b)",
-CVI_VX_DV, TypeCVI_VX_DV>, Enc_5023792, Requires<[HasV60T,UseHVX]> {
+tc_7c3f55c4, TypeCVI_VX_DV>, Enc_aad80c, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b111;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011001000;
@@ -34881,7 +35242,7 @@ def V6_vdmpybus_dv_acc : HInst<
(outs VecDblRegs:$Vxx32),
(ins VecDblRegs:$Vxx32in, VecDblRegs:$Vuu32, IntRegs:$Rt32),
"$Vxx32.h += vdmpy($Vuu32.ub,$Rt32.b)",
-CVI_VX_DV, TypeCVI_VX_DV>, Enc_4327792, Requires<[HasV60T,UseHVX]> {
+tc_d98f4d63, TypeCVI_VX_DV>, Enc_d6990d, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b111;
let Inst{13-13} = 0b1;
let Inst{31-21} = 0b00011001000;
@@ -34895,7 +35256,7 @@ def V6_vdmpybus_dv_acc_128B : HInst<
(outs VecDblRegs128B:$Vxx32),
(ins VecDblRegs128B:$Vxx32in, VecDblRegs128B:$Vuu32, IntRegs:$Rt32),
"$Vxx32.h += vdmpy($Vuu32.ub,$Rt32.b)",
-CVI_VX_DV, TypeCVI_VX_DV>, Enc_4327792, Requires<[HasV60T,UseHVX]> {
+tc_d98f4d63, TypeCVI_VX_DV>, Enc_d6990d, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b111;
let Inst{13-13} = 0b1;
let Inst{31-21} = 0b00011001000;
@@ -34960,7 +35321,7 @@ def V6_vdmpyhb : HInst<
(outs VectorRegs:$Vd32),
(ins VectorRegs:$Vu32, IntRegs:$Rt32),
"$Vd32.w = vdmpy($Vu32.h,$Rt32.b)",
-CVI_VX, TypeCVI_VX>, Enc_16214129, Requires<[HasV60T,UseHVX]> {
+tc_69b6dd20, TypeCVI_VX>, Enc_b087ac, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b010;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011001000;
@@ -34972,7 +35333,7 @@ def V6_vdmpyhb_128B : HInst<
(outs VectorRegs128B:$Vd32),
(ins VectorRegs128B:$Vu32, IntRegs:$Rt32),
"$Vd32.w = vdmpy($Vu32.h,$Rt32.b)",
-CVI_VX, TypeCVI_VX>, Enc_16214129, Requires<[HasV60T,UseHVX]> {
+tc_69b6dd20, TypeCVI_VX>, Enc_b087ac, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b010;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011001000;
@@ -34985,7 +35346,7 @@ def V6_vdmpyhb_acc : HInst<
(outs VectorRegs:$Vx32),
(ins VectorRegs:$Vx32in, VectorRegs:$Vu32, IntRegs:$Rt32),
"$Vx32.w += vdmpy($Vu32.h,$Rt32.b)",
-CVI_VX, TypeCVI_VX>, Enc_10058269, Requires<[HasV60T,UseHVX]> {
+tc_d725e5b0, TypeCVI_VX>, Enc_5138b3, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b011;
let Inst{13-13} = 0b1;
let Inst{31-21} = 0b00011001000;
@@ -34999,7 +35360,7 @@ def V6_vdmpyhb_acc_128B : HInst<
(outs VectorRegs128B:$Vx32),
(ins VectorRegs128B:$Vx32in, VectorRegs128B:$Vu32, IntRegs:$Rt32),
"$Vx32.w += vdmpy($Vu32.h,$Rt32.b)",
-CVI_VX, TypeCVI_VX>, Enc_10058269, Requires<[HasV60T,UseHVX]> {
+tc_d725e5b0, TypeCVI_VX>, Enc_5138b3, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b011;
let Inst{13-13} = 0b1;
let Inst{31-21} = 0b00011001000;
@@ -35064,7 +35425,7 @@ def V6_vdmpyhb_dv : HInst<
(outs VecDblRegs:$Vdd32),
(ins VecDblRegs:$Vuu32, IntRegs:$Rt32),
"$Vdd32.w = vdmpy($Vuu32.h,$Rt32.b)",
-CVI_VX_DV, TypeCVI_VX_DV>, Enc_5023792, Requires<[HasV60T,UseHVX]> {
+tc_7c3f55c4, TypeCVI_VX_DV>, Enc_aad80c, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b100;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011001001;
@@ -35076,7 +35437,7 @@ def V6_vdmpyhb_dv_128B : HInst<
(outs VecDblRegs128B:$Vdd32),
(ins VecDblRegs128B:$Vuu32, IntRegs:$Rt32),
"$Vdd32.w = vdmpy($Vuu32.h,$Rt32.b)",
-CVI_VX_DV, TypeCVI_VX_DV>, Enc_5023792, Requires<[HasV60T,UseHVX]> {
+tc_7c3f55c4, TypeCVI_VX_DV>, Enc_aad80c, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b100;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011001001;
@@ -35089,7 +35450,7 @@ def V6_vdmpyhb_dv_acc : HInst<
(outs VecDblRegs:$Vxx32),
(ins VecDblRegs:$Vxx32in, VecDblRegs:$Vuu32, IntRegs:$Rt32),
"$Vxx32.w += vdmpy($Vuu32.h,$Rt32.b)",
-CVI_VX_DV, TypeCVI_VX_DV>, Enc_4327792, Requires<[HasV60T,UseHVX]> {
+tc_d98f4d63, TypeCVI_VX_DV>, Enc_d6990d, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b100;
let Inst{13-13} = 0b1;
let Inst{31-21} = 0b00011001001;
@@ -35103,7 +35464,7 @@ def V6_vdmpyhb_dv_acc_128B : HInst<
(outs VecDblRegs128B:$Vxx32),
(ins VecDblRegs128B:$Vxx32in, VecDblRegs128B:$Vuu32, IntRegs:$Rt32),
"$Vxx32.w += vdmpy($Vuu32.h,$Rt32.b)",
-CVI_VX_DV, TypeCVI_VX_DV>, Enc_4327792, Requires<[HasV60T,UseHVX]> {
+tc_d98f4d63, TypeCVI_VX_DV>, Enc_d6990d, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b100;
let Inst{13-13} = 0b1;
let Inst{31-21} = 0b00011001001;
@@ -35168,7 +35529,7 @@ def V6_vdmpyhisat : HInst<
(outs VectorRegs:$Vd32),
(ins VecDblRegs:$Vuu32, IntRegs:$Rt32),
"$Vd32.w = vdmpy($Vuu32.h,$Rt32.h):sat",
-CVI_VX_DV, TypeCVI_VX_DV>, Enc_36641, Requires<[HasV60T,UseHVX]> {
+tc_7c3f55c4, TypeCVI_VX_DV>, Enc_0e41fa, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b011;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011001001;
@@ -35180,7 +35541,7 @@ def V6_vdmpyhisat_128B : HInst<
(outs VectorRegs128B:$Vd32),
(ins VecDblRegs128B:$Vuu32, IntRegs:$Rt32),
"$Vd32.w = vdmpy($Vuu32.h,$Rt32.h):sat",
-CVI_VX_DV, TypeCVI_VX_DV>, Enc_36641, Requires<[HasV60T,UseHVX]> {
+tc_7c3f55c4, TypeCVI_VX_DV>, Enc_0e41fa, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b011;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011001001;
@@ -35193,7 +35554,7 @@ def V6_vdmpyhisat_acc : HInst<
(outs VectorRegs:$Vx32),
(ins VectorRegs:$Vx32in, VecDblRegs:$Vuu32, IntRegs:$Rt32),
"$Vx32.w += vdmpy($Vuu32.h,$Rt32.h):sat",
-CVI_VX_DV, TypeCVI_VX_DV>, Enc_5890213, Requires<[HasV60T,UseHVX]> {
+tc_d98f4d63, TypeCVI_VX_DV>, Enc_cc857d, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b010;
let Inst{13-13} = 0b1;
let Inst{31-21} = 0b00011001001;
@@ -35207,7 +35568,7 @@ def V6_vdmpyhisat_acc_128B : HInst<
(outs VectorRegs128B:$Vx32),
(ins VectorRegs128B:$Vx32in, VecDblRegs128B:$Vuu32, IntRegs:$Rt32),
"$Vx32.w += vdmpy($Vuu32.h,$Rt32.h):sat",
-CVI_VX_DV, TypeCVI_VX_DV>, Enc_5890213, Requires<[HasV60T,UseHVX]> {
+tc_d98f4d63, TypeCVI_VX_DV>, Enc_cc857d, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b010;
let Inst{13-13} = 0b1;
let Inst{31-21} = 0b00011001001;
@@ -35272,7 +35633,7 @@ def V6_vdmpyhsat : HInst<
(outs VectorRegs:$Vd32),
(ins VectorRegs:$Vu32, IntRegs:$Rt32),
"$Vd32.w = vdmpy($Vu32.h,$Rt32.h):sat",
-CVI_VX_DV, TypeCVI_VX_DV>, Enc_16214129, Requires<[HasV60T,UseHVX]> {
+tc_7c3f55c4, TypeCVI_VX_DV>, Enc_b087ac, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b010;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011001001;
@@ -35284,7 +35645,7 @@ def V6_vdmpyhsat_128B : HInst<
(outs VectorRegs128B:$Vd32),
(ins VectorRegs128B:$Vu32, IntRegs:$Rt32),
"$Vd32.w = vdmpy($Vu32.h,$Rt32.h):sat",
-CVI_VX_DV, TypeCVI_VX_DV>, Enc_16214129, Requires<[HasV60T,UseHVX]> {
+tc_7c3f55c4, TypeCVI_VX_DV>, Enc_b087ac, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b010;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011001001;
@@ -35297,7 +35658,7 @@ def V6_vdmpyhsat_acc : HInst<
(outs VectorRegs:$Vx32),
(ins VectorRegs:$Vx32in, VectorRegs:$Vu32, IntRegs:$Rt32),
"$Vx32.w += vdmpy($Vu32.h,$Rt32.h):sat",
-CVI_VX_DV, TypeCVI_VX_DV>, Enc_10058269, Requires<[HasV60T,UseHVX]> {
+tc_d98f4d63, TypeCVI_VX_DV>, Enc_5138b3, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b011;
let Inst{13-13} = 0b1;
let Inst{31-21} = 0b00011001001;
@@ -35311,7 +35672,7 @@ def V6_vdmpyhsat_acc_128B : HInst<
(outs VectorRegs128B:$Vx32),
(ins VectorRegs128B:$Vx32in, VectorRegs128B:$Vu32, IntRegs:$Rt32),
"$Vx32.w += vdmpy($Vu32.h,$Rt32.h):sat",
-CVI_VX_DV, TypeCVI_VX_DV>, Enc_10058269, Requires<[HasV60T,UseHVX]> {
+tc_d98f4d63, TypeCVI_VX_DV>, Enc_5138b3, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b011;
let Inst{13-13} = 0b1;
let Inst{31-21} = 0b00011001001;
@@ -35376,7 +35737,7 @@ def V6_vdmpyhsuisat : HInst<
(outs VectorRegs:$Vd32),
(ins VecDblRegs:$Vuu32, IntRegs:$Rt32),
"$Vd32.w = vdmpy($Vuu32.h,$Rt32.uh,#1):sat",
-CVI_VX_DV, TypeCVI_VX_DV>, Enc_36641, Requires<[HasV60T,UseHVX]> {
+tc_7c3f55c4, TypeCVI_VX_DV>, Enc_0e41fa, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b001;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011001001;
@@ -35388,7 +35749,7 @@ def V6_vdmpyhsuisat_128B : HInst<
(outs VectorRegs128B:$Vd32),
(ins VecDblRegs128B:$Vuu32, IntRegs:$Rt32),
"$Vd32.w = vdmpy($Vuu32.h,$Rt32.uh,#1):sat",
-CVI_VX_DV, TypeCVI_VX_DV>, Enc_36641, Requires<[HasV60T,UseHVX]> {
+tc_7c3f55c4, TypeCVI_VX_DV>, Enc_0e41fa, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b001;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011001001;
@@ -35401,7 +35762,7 @@ def V6_vdmpyhsuisat_acc : HInst<
(outs VectorRegs:$Vx32),
(ins VectorRegs:$Vx32in, VecDblRegs:$Vuu32, IntRegs:$Rt32),
"$Vx32.w += vdmpy($Vuu32.h,$Rt32.uh,#1):sat",
-CVI_VX_DV, TypeCVI_VX_DV>, Enc_5890213, Requires<[HasV60T,UseHVX]> {
+tc_d98f4d63, TypeCVI_VX_DV>, Enc_cc857d, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b001;
let Inst{13-13} = 0b1;
let Inst{31-21} = 0b00011001001;
@@ -35415,7 +35776,7 @@ def V6_vdmpyhsuisat_acc_128B : HInst<
(outs VectorRegs128B:$Vx32),
(ins VectorRegs128B:$Vx32in, VecDblRegs128B:$Vuu32, IntRegs:$Rt32),
"$Vx32.w += vdmpy($Vuu32.h,$Rt32.uh,#1):sat",
-CVI_VX_DV, TypeCVI_VX_DV>, Enc_5890213, Requires<[HasV60T,UseHVX]> {
+tc_d98f4d63, TypeCVI_VX_DV>, Enc_cc857d, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b001;
let Inst{13-13} = 0b1;
let Inst{31-21} = 0b00011001001;
@@ -35480,7 +35841,7 @@ def V6_vdmpyhsusat : HInst<
(outs VectorRegs:$Vd32),
(ins VectorRegs:$Vu32, IntRegs:$Rt32),
"$Vd32.w = vdmpy($Vu32.h,$Rt32.uh):sat",
-CVI_VX_DV, TypeCVI_VX_DV>, Enc_16214129, Requires<[HasV60T,UseHVX]> {
+tc_7c3f55c4, TypeCVI_VX_DV>, Enc_b087ac, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011001001;
@@ -35492,7 +35853,7 @@ def V6_vdmpyhsusat_128B : HInst<
(outs VectorRegs128B:$Vd32),
(ins VectorRegs128B:$Vu32, IntRegs:$Rt32),
"$Vd32.w = vdmpy($Vu32.h,$Rt32.uh):sat",
-CVI_VX_DV, TypeCVI_VX_DV>, Enc_16214129, Requires<[HasV60T,UseHVX]> {
+tc_7c3f55c4, TypeCVI_VX_DV>, Enc_b087ac, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011001001;
@@ -35505,7 +35866,7 @@ def V6_vdmpyhsusat_acc : HInst<
(outs VectorRegs:$Vx32),
(ins VectorRegs:$Vx32in, VectorRegs:$Vu32, IntRegs:$Rt32),
"$Vx32.w += vdmpy($Vu32.h,$Rt32.uh):sat",
-CVI_VX_DV, TypeCVI_VX_DV>, Enc_10058269, Requires<[HasV60T,UseHVX]> {
+tc_d98f4d63, TypeCVI_VX_DV>, Enc_5138b3, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b1;
let Inst{31-21} = 0b00011001001;
@@ -35519,7 +35880,7 @@ def V6_vdmpyhsusat_acc_128B : HInst<
(outs VectorRegs128B:$Vx32),
(ins VectorRegs128B:$Vx32in, VectorRegs128B:$Vu32, IntRegs:$Rt32),
"$Vx32.w += vdmpy($Vu32.h,$Rt32.uh):sat",
-CVI_VX_DV, TypeCVI_VX_DV>, Enc_10058269, Requires<[HasV60T,UseHVX]> {
+tc_d98f4d63, TypeCVI_VX_DV>, Enc_5138b3, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b1;
let Inst{31-21} = 0b00011001001;
@@ -35584,7 +35945,7 @@ def V6_vdmpyhvsat : HInst<
(outs VectorRegs:$Vd32),
(ins VectorRegs:$Vu32, VectorRegs:$Vv32),
"$Vd32.w = vdmpy($Vu32.h,$Vv32.h):sat",
-CVI_VX_DV, TypeCVI_VX_DV>, Enc_6223403, Requires<[HasV60T,UseHVX]> {
+tc_eda67dcd, TypeCVI_VX_DV>, Enc_45364e, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b011;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011100000;
@@ -35596,7 +35957,7 @@ def V6_vdmpyhvsat_128B : HInst<
(outs VectorRegs128B:$Vd32),
(ins VectorRegs128B:$Vu32, VectorRegs128B:$Vv32),
"$Vd32.w = vdmpy($Vu32.h,$Vv32.h):sat",
-CVI_VX_DV, TypeCVI_VX_DV>, Enc_6223403, Requires<[HasV60T,UseHVX]> {
+tc_eda67dcd, TypeCVI_VX_DV>, Enc_45364e, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b011;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011100000;
@@ -35609,7 +35970,7 @@ def V6_vdmpyhvsat_acc : HInst<
(outs VectorRegs:$Vx32),
(ins VectorRegs:$Vx32in, VectorRegs:$Vu32, VectorRegs:$Vv32),
"$Vx32.w += vdmpy($Vu32.h,$Vv32.h):sat",
-CVI_VX_DV, TypeCVI_VX_DV>, Enc_2328527, Requires<[HasV60T,UseHVX]> {
+tc_e172d86a, TypeCVI_VX_DV>, Enc_a7341a, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b011;
let Inst{13-13} = 0b1;
let Inst{31-21} = 0b00011100000;
@@ -35623,7 +35984,7 @@ def V6_vdmpyhvsat_acc_128B : HInst<
(outs VectorRegs128B:$Vx32),
(ins VectorRegs128B:$Vx32in, VectorRegs128B:$Vu32, VectorRegs128B:$Vv32),
"$Vx32.w += vdmpy($Vu32.h,$Vv32.h):sat",
-CVI_VX_DV, TypeCVI_VX_DV>, Enc_2328527, Requires<[HasV60T,UseHVX]> {
+tc_e172d86a, TypeCVI_VX_DV>, Enc_a7341a, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b011;
let Inst{13-13} = 0b1;
let Inst{31-21} = 0b00011100000;
@@ -35688,7 +36049,7 @@ def V6_vdsaduh : HInst<
(outs VecDblRegs:$Vdd32),
(ins VecDblRegs:$Vuu32, IntRegs:$Rt32),
"$Vdd32.uw = vdsad($Vuu32.uh,$Rt32.uh)",
-CVI_VX_DV, TypeCVI_VX_DV>, Enc_5023792, Requires<[HasV60T,UseHVX]> {
+tc_7c3f55c4, TypeCVI_VX_DV>, Enc_aad80c, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b101;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011001000;
@@ -35700,7 +36061,7 @@ def V6_vdsaduh_128B : HInst<
(outs VecDblRegs128B:$Vdd32),
(ins VecDblRegs128B:$Vuu32, IntRegs:$Rt32),
"$Vdd32.uw = vdsad($Vuu32.uh,$Rt32.uh)",
-CVI_VX_DV, TypeCVI_VX_DV>, Enc_5023792, Requires<[HasV60T,UseHVX]> {
+tc_7c3f55c4, TypeCVI_VX_DV>, Enc_aad80c, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b101;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011001000;
@@ -35713,7 +36074,7 @@ def V6_vdsaduh_acc : HInst<
(outs VecDblRegs:$Vxx32),
(ins VecDblRegs:$Vxx32in, VecDblRegs:$Vuu32, IntRegs:$Rt32),
"$Vxx32.uw += vdsad($Vuu32.uh,$Rt32.uh)",
-CVI_VX_DV, TypeCVI_VX_DV>, Enc_4327792, Requires<[HasV60T,UseHVX]> {
+tc_d98f4d63, TypeCVI_VX_DV>, Enc_d6990d, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b1;
let Inst{31-21} = 0b00011001011;
@@ -35727,7 +36088,7 @@ def V6_vdsaduh_acc_128B : HInst<
(outs VecDblRegs128B:$Vxx32),
(ins VecDblRegs128B:$Vxx32in, VecDblRegs128B:$Vuu32, IntRegs:$Rt32),
"$Vxx32.uw += vdsad($Vuu32.uh,$Rt32.uh)",
-CVI_VX_DV, TypeCVI_VX_DV>, Enc_4327792, Requires<[HasV60T,UseHVX]> {
+tc_d98f4d63, TypeCVI_VX_DV>, Enc_d6990d, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b1;
let Inst{31-21} = 0b00011001011;
@@ -35792,7 +36153,7 @@ def V6_veqb : HInst<
(outs VecPredRegs:$Qd4),
(ins VectorRegs:$Vu32, VectorRegs:$Vv32),
"$Qd4 = vcmp.eq($Vu32.b,$Vv32.b)",
-CVI_VA, TypeCVI_VA>, Enc_13983714, Requires<[HasV60T,UseHVX]> {
+tc_bbaf280e, TypeCVI_VA>, Enc_95441f, Requires<[HasV60T,UseHVX]> {
let Inst{7-2} = 0b000000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011111100;
@@ -35804,7 +36165,7 @@ def V6_veqb_128B : HInst<
(outs VecPredRegs128B:$Qd4),
(ins VectorRegs128B:$Vu32, VectorRegs128B:$Vv32),
"$Qd4 = vcmp.eq($Vu32.b,$Vv32.b)",
-CVI_VA, TypeCVI_VA>, Enc_13983714, Requires<[HasV60T,UseHVX]> {
+tc_bbaf280e, TypeCVI_VA>, Enc_95441f, Requires<[HasV60T,UseHVX]> {
let Inst{7-2} = 0b000000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011111100;
@@ -35817,7 +36178,7 @@ def V6_veqb_and : HInst<
(outs VecPredRegs:$Qx4),
(ins VecPredRegs:$Qx4in, VectorRegs:$Vu32, VectorRegs:$Vv32),
"$Qx4 &= vcmp.eq($Vu32.b,$Vv32.b)",
-CVI_VA, TypeCVI_VA>, Enc_7470998, Requires<[HasV60T,UseHVX]> {
+tc_a3127e12, TypeCVI_VA>, Enc_eaa9f8, Requires<[HasV60T,UseHVX]> {
let Inst{7-2} = 0b000000;
let Inst{13-13} = 0b1;
let Inst{31-21} = 0b00011100100;
@@ -35830,7 +36191,7 @@ def V6_veqb_and_128B : HInst<
(outs VecPredRegs128B:$Qx4),
(ins VecPredRegs128B:$Qx4in, VectorRegs128B:$Vu32, VectorRegs128B:$Vv32),
"$Qx4 &= vcmp.eq($Vu32.b,$Vv32.b)",
-CVI_VA, TypeCVI_VA>, Enc_7470998, Requires<[HasV60T,UseHVX]> {
+tc_a3127e12, TypeCVI_VA>, Enc_eaa9f8, Requires<[HasV60T,UseHVX]> {
let Inst{7-2} = 0b000000;
let Inst{13-13} = 0b1;
let Inst{31-21} = 0b00011100100;
@@ -35844,7 +36205,7 @@ def V6_veqb_or : HInst<
(outs VecPredRegs:$Qx4),
(ins VecPredRegs:$Qx4in, VectorRegs:$Vu32, VectorRegs:$Vv32),
"$Qx4 |= vcmp.eq($Vu32.b,$Vv32.b)",
-CVI_VA, TypeCVI_VA>, Enc_7470998, Requires<[HasV60T,UseHVX]> {
+tc_a3127e12, TypeCVI_VA>, Enc_eaa9f8, Requires<[HasV60T,UseHVX]> {
let Inst{7-2} = 0b010000;
let Inst{13-13} = 0b1;
let Inst{31-21} = 0b00011100100;
@@ -35858,7 +36219,7 @@ def V6_veqb_or_128B : HInst<
(outs VecPredRegs128B:$Qx4),
(ins VecPredRegs128B:$Qx4in, VectorRegs128B:$Vu32, VectorRegs128B:$Vv32),
"$Qx4 |= vcmp.eq($Vu32.b,$Vv32.b)",
-CVI_VA, TypeCVI_VA>, Enc_7470998, Requires<[HasV60T,UseHVX]> {
+tc_a3127e12, TypeCVI_VA>, Enc_eaa9f8, Requires<[HasV60T,UseHVX]> {
let Inst{7-2} = 0b010000;
let Inst{13-13} = 0b1;
let Inst{31-21} = 0b00011100100;
@@ -35873,7 +36234,7 @@ def V6_veqb_xor : HInst<
(outs VecPredRegs:$Qx4),
(ins VecPredRegs:$Qx4in, VectorRegs:$Vu32, VectorRegs:$Vv32),
"$Qx4 ^= vcmp.eq($Vu32.b,$Vv32.b)",
-CVI_VA, TypeCVI_VA>, Enc_7470998, Requires<[HasV60T,UseHVX]> {
+tc_a3127e12, TypeCVI_VA>, Enc_eaa9f8, Requires<[HasV60T,UseHVX]> {
let Inst{7-2} = 0b100000;
let Inst{13-13} = 0b1;
let Inst{31-21} = 0b00011100100;
@@ -35886,7 +36247,7 @@ def V6_veqb_xor_128B : HInst<
(outs VecPredRegs128B:$Qx4),
(ins VecPredRegs128B:$Qx4in, VectorRegs128B:$Vu32, VectorRegs128B:$Vv32),
"$Qx4 ^= vcmp.eq($Vu32.b,$Vv32.b)",
-CVI_VA, TypeCVI_VA>, Enc_7470998, Requires<[HasV60T,UseHVX]> {
+tc_a3127e12, TypeCVI_VA>, Enc_eaa9f8, Requires<[HasV60T,UseHVX]> {
let Inst{7-2} = 0b100000;
let Inst{13-13} = 0b1;
let Inst{31-21} = 0b00011100100;
@@ -35900,7 +36261,7 @@ def V6_veqh : HInst<
(outs VecPredRegs:$Qd4),
(ins VectorRegs:$Vu32, VectorRegs:$Vv32),
"$Qd4 = vcmp.eq($Vu32.h,$Vv32.h)",
-CVI_VA, TypeCVI_VA>, Enc_13983714, Requires<[HasV60T,UseHVX]> {
+tc_bbaf280e, TypeCVI_VA>, Enc_95441f, Requires<[HasV60T,UseHVX]> {
let Inst{7-2} = 0b000001;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011111100;
@@ -35912,7 +36273,7 @@ def V6_veqh_128B : HInst<
(outs VecPredRegs128B:$Qd4),
(ins VectorRegs128B:$Vu32, VectorRegs128B:$Vv32),
"$Qd4 = vcmp.eq($Vu32.h,$Vv32.h)",
-CVI_VA, TypeCVI_VA>, Enc_13983714, Requires<[HasV60T,UseHVX]> {
+tc_bbaf280e, TypeCVI_VA>, Enc_95441f, Requires<[HasV60T,UseHVX]> {
let Inst{7-2} = 0b000001;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011111100;
@@ -35925,7 +36286,7 @@ def V6_veqh_and : HInst<
(outs VecPredRegs:$Qx4),
(ins VecPredRegs:$Qx4in, VectorRegs:$Vu32, VectorRegs:$Vv32),
"$Qx4 &= vcmp.eq($Vu32.h,$Vv32.h)",
-CVI_VA, TypeCVI_VA>, Enc_7470998, Requires<[HasV60T,UseHVX]> {
+tc_a3127e12, TypeCVI_VA>, Enc_eaa9f8, Requires<[HasV60T,UseHVX]> {
let Inst{7-2} = 0b000001;
let Inst{13-13} = 0b1;
let Inst{31-21} = 0b00011100100;
@@ -35938,7 +36299,7 @@ def V6_veqh_and_128B : HInst<
(outs VecPredRegs128B:$Qx4),
(ins VecPredRegs128B:$Qx4in, VectorRegs128B:$Vu32, VectorRegs128B:$Vv32),
"$Qx4 &= vcmp.eq($Vu32.h,$Vv32.h)",
-CVI_VA, TypeCVI_VA>, Enc_7470998, Requires<[HasV60T,UseHVX]> {
+tc_a3127e12, TypeCVI_VA>, Enc_eaa9f8, Requires<[HasV60T,UseHVX]> {
let Inst{7-2} = 0b000001;
let Inst{13-13} = 0b1;
let Inst{31-21} = 0b00011100100;
@@ -35952,7 +36313,7 @@ def V6_veqh_or : HInst<
(outs VecPredRegs:$Qx4),
(ins VecPredRegs:$Qx4in, VectorRegs:$Vu32, VectorRegs:$Vv32),
"$Qx4 |= vcmp.eq($Vu32.h,$Vv32.h)",
-CVI_VA, TypeCVI_VA>, Enc_7470998, Requires<[HasV60T,UseHVX]> {
+tc_a3127e12, TypeCVI_VA>, Enc_eaa9f8, Requires<[HasV60T,UseHVX]> {
let Inst{7-2} = 0b010001;
let Inst{13-13} = 0b1;
let Inst{31-21} = 0b00011100100;
@@ -35966,7 +36327,7 @@ def V6_veqh_or_128B : HInst<
(outs VecPredRegs128B:$Qx4),
(ins VecPredRegs128B:$Qx4in, VectorRegs128B:$Vu32, VectorRegs128B:$Vv32),
"$Qx4 |= vcmp.eq($Vu32.h,$Vv32.h)",
-CVI_VA, TypeCVI_VA>, Enc_7470998, Requires<[HasV60T,UseHVX]> {
+tc_a3127e12, TypeCVI_VA>, Enc_eaa9f8, Requires<[HasV60T,UseHVX]> {
let Inst{7-2} = 0b010001;
let Inst{13-13} = 0b1;
let Inst{31-21} = 0b00011100100;
@@ -35981,7 +36342,7 @@ def V6_veqh_xor : HInst<
(outs VecPredRegs:$Qx4),
(ins VecPredRegs:$Qx4in, VectorRegs:$Vu32, VectorRegs:$Vv32),
"$Qx4 ^= vcmp.eq($Vu32.h,$Vv32.h)",
-CVI_VA, TypeCVI_VA>, Enc_7470998, Requires<[HasV60T,UseHVX]> {
+tc_a3127e12, TypeCVI_VA>, Enc_eaa9f8, Requires<[HasV60T,UseHVX]> {
let Inst{7-2} = 0b100001;
let Inst{13-13} = 0b1;
let Inst{31-21} = 0b00011100100;
@@ -35994,7 +36355,7 @@ def V6_veqh_xor_128B : HInst<
(outs VecPredRegs128B:$Qx4),
(ins VecPredRegs128B:$Qx4in, VectorRegs128B:$Vu32, VectorRegs128B:$Vv32),
"$Qx4 ^= vcmp.eq($Vu32.h,$Vv32.h)",
-CVI_VA, TypeCVI_VA>, Enc_7470998, Requires<[HasV60T,UseHVX]> {
+tc_a3127e12, TypeCVI_VA>, Enc_eaa9f8, Requires<[HasV60T,UseHVX]> {
let Inst{7-2} = 0b100001;
let Inst{13-13} = 0b1;
let Inst{31-21} = 0b00011100100;
@@ -36008,7 +36369,7 @@ def V6_veqw : HInst<
(outs VecPredRegs:$Qd4),
(ins VectorRegs:$Vu32, VectorRegs:$Vv32),
"$Qd4 = vcmp.eq($Vu32.w,$Vv32.w)",
-CVI_VA, TypeCVI_VA>, Enc_13983714, Requires<[HasV60T,UseHVX]> {
+tc_bbaf280e, TypeCVI_VA>, Enc_95441f, Requires<[HasV60T,UseHVX]> {
let Inst{7-2} = 0b000010;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011111100;
@@ -36020,7 +36381,7 @@ def V6_veqw_128B : HInst<
(outs VecPredRegs128B:$Qd4),
(ins VectorRegs128B:$Vu32, VectorRegs128B:$Vv32),
"$Qd4 = vcmp.eq($Vu32.w,$Vv32.w)",
-CVI_VA, TypeCVI_VA>, Enc_13983714, Requires<[HasV60T,UseHVX]> {
+tc_bbaf280e, TypeCVI_VA>, Enc_95441f, Requires<[HasV60T,UseHVX]> {
let Inst{7-2} = 0b000010;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011111100;
@@ -36033,7 +36394,7 @@ def V6_veqw_and : HInst<
(outs VecPredRegs:$Qx4),
(ins VecPredRegs:$Qx4in, VectorRegs:$Vu32, VectorRegs:$Vv32),
"$Qx4 &= vcmp.eq($Vu32.w,$Vv32.w)",
-CVI_VA, TypeCVI_VA>, Enc_7470998, Requires<[HasV60T,UseHVX]> {
+tc_a3127e12, TypeCVI_VA>, Enc_eaa9f8, Requires<[HasV60T,UseHVX]> {
let Inst{7-2} = 0b000010;
let Inst{13-13} = 0b1;
let Inst{31-21} = 0b00011100100;
@@ -36046,7 +36407,7 @@ def V6_veqw_and_128B : HInst<
(outs VecPredRegs128B:$Qx4),
(ins VecPredRegs128B:$Qx4in, VectorRegs128B:$Vu32, VectorRegs128B:$Vv32),
"$Qx4 &= vcmp.eq($Vu32.w,$Vv32.w)",
-CVI_VA, TypeCVI_VA>, Enc_7470998, Requires<[HasV60T,UseHVX]> {
+tc_a3127e12, TypeCVI_VA>, Enc_eaa9f8, Requires<[HasV60T,UseHVX]> {
let Inst{7-2} = 0b000010;
let Inst{13-13} = 0b1;
let Inst{31-21} = 0b00011100100;
@@ -36060,7 +36421,7 @@ def V6_veqw_or : HInst<
(outs VecPredRegs:$Qx4),
(ins VecPredRegs:$Qx4in, VectorRegs:$Vu32, VectorRegs:$Vv32),
"$Qx4 |= vcmp.eq($Vu32.w,$Vv32.w)",
-CVI_VA, TypeCVI_VA>, Enc_7470998, Requires<[HasV60T,UseHVX]> {
+tc_a3127e12, TypeCVI_VA>, Enc_eaa9f8, Requires<[HasV60T,UseHVX]> {
let Inst{7-2} = 0b010010;
let Inst{13-13} = 0b1;
let Inst{31-21} = 0b00011100100;
@@ -36074,7 +36435,7 @@ def V6_veqw_or_128B : HInst<
(outs VecPredRegs128B:$Qx4),
(ins VecPredRegs128B:$Qx4in, VectorRegs128B:$Vu32, VectorRegs128B:$Vv32),
"$Qx4 |= vcmp.eq($Vu32.w,$Vv32.w)",
-CVI_VA, TypeCVI_VA>, Enc_7470998, Requires<[HasV60T,UseHVX]> {
+tc_a3127e12, TypeCVI_VA>, Enc_eaa9f8, Requires<[HasV60T,UseHVX]> {
let Inst{7-2} = 0b010010;
let Inst{13-13} = 0b1;
let Inst{31-21} = 0b00011100100;
@@ -36089,7 +36450,7 @@ def V6_veqw_xor : HInst<
(outs VecPredRegs:$Qx4),
(ins VecPredRegs:$Qx4in, VectorRegs:$Vu32, VectorRegs:$Vv32),
"$Qx4 ^= vcmp.eq($Vu32.w,$Vv32.w)",
-CVI_VA, TypeCVI_VA>, Enc_7470998, Requires<[HasV60T,UseHVX]> {
+tc_a3127e12, TypeCVI_VA>, Enc_eaa9f8, Requires<[HasV60T,UseHVX]> {
let Inst{7-2} = 0b100010;
let Inst{13-13} = 0b1;
let Inst{31-21} = 0b00011100100;
@@ -36102,7 +36463,7 @@ def V6_veqw_xor_128B : HInst<
(outs VecPredRegs128B:$Qx4),
(ins VecPredRegs128B:$Qx4in, VectorRegs128B:$Vu32, VectorRegs128B:$Vv32),
"$Qx4 ^= vcmp.eq($Vu32.w,$Vv32.w)",
-CVI_VA, TypeCVI_VA>, Enc_7470998, Requires<[HasV60T,UseHVX]> {
+tc_a3127e12, TypeCVI_VA>, Enc_eaa9f8, Requires<[HasV60T,UseHVX]> {
let Inst{7-2} = 0b100010;
let Inst{13-13} = 0b1;
let Inst{31-21} = 0b00011100100;
@@ -36116,7 +36477,7 @@ def V6_vgtb : HInst<
(outs VecPredRegs:$Qd4),
(ins VectorRegs:$Vu32, VectorRegs:$Vv32),
"$Qd4 = vcmp.gt($Vu32.b,$Vv32.b)",
-CVI_VA, TypeCVI_VA>, Enc_13983714, Requires<[HasV60T,UseHVX]> {
+tc_bbaf280e, TypeCVI_VA>, Enc_95441f, Requires<[HasV60T,UseHVX]> {
let Inst{7-2} = 0b000100;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011111100;
@@ -36128,7 +36489,7 @@ def V6_vgtb_128B : HInst<
(outs VecPredRegs128B:$Qd4),
(ins VectorRegs128B:$Vu32, VectorRegs128B:$Vv32),
"$Qd4 = vcmp.gt($Vu32.b,$Vv32.b)",
-CVI_VA, TypeCVI_VA>, Enc_13983714, Requires<[HasV60T,UseHVX]> {
+tc_bbaf280e, TypeCVI_VA>, Enc_95441f, Requires<[HasV60T,UseHVX]> {
let Inst{7-2} = 0b000100;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011111100;
@@ -36141,7 +36502,7 @@ def V6_vgtb_and : HInst<
(outs VecPredRegs:$Qx4),
(ins VecPredRegs:$Qx4in, VectorRegs:$Vu32, VectorRegs:$Vv32),
"$Qx4 &= vcmp.gt($Vu32.b,$Vv32.b)",
-CVI_VA, TypeCVI_VA>, Enc_7470998, Requires<[HasV60T,UseHVX]> {
+tc_a3127e12, TypeCVI_VA>, Enc_eaa9f8, Requires<[HasV60T,UseHVX]> {
let Inst{7-2} = 0b000100;
let Inst{13-13} = 0b1;
let Inst{31-21} = 0b00011100100;
@@ -36154,7 +36515,7 @@ def V6_vgtb_and_128B : HInst<
(outs VecPredRegs128B:$Qx4),
(ins VecPredRegs128B:$Qx4in, VectorRegs128B:$Vu32, VectorRegs128B:$Vv32),
"$Qx4 &= vcmp.gt($Vu32.b,$Vv32.b)",
-CVI_VA, TypeCVI_VA>, Enc_7470998, Requires<[HasV60T,UseHVX]> {
+tc_a3127e12, TypeCVI_VA>, Enc_eaa9f8, Requires<[HasV60T,UseHVX]> {
let Inst{7-2} = 0b000100;
let Inst{13-13} = 0b1;
let Inst{31-21} = 0b00011100100;
@@ -36168,7 +36529,7 @@ def V6_vgtb_or : HInst<
(outs VecPredRegs:$Qx4),
(ins VecPredRegs:$Qx4in, VectorRegs:$Vu32, VectorRegs:$Vv32),
"$Qx4 |= vcmp.gt($Vu32.b,$Vv32.b)",
-CVI_VA, TypeCVI_VA>, Enc_7470998, Requires<[HasV60T,UseHVX]> {
+tc_a3127e12, TypeCVI_VA>, Enc_eaa9f8, Requires<[HasV60T,UseHVX]> {
let Inst{7-2} = 0b010100;
let Inst{13-13} = 0b1;
let Inst{31-21} = 0b00011100100;
@@ -36182,7 +36543,7 @@ def V6_vgtb_or_128B : HInst<
(outs VecPredRegs128B:$Qx4),
(ins VecPredRegs128B:$Qx4in, VectorRegs128B:$Vu32, VectorRegs128B:$Vv32),
"$Qx4 |= vcmp.gt($Vu32.b,$Vv32.b)",
-CVI_VA, TypeCVI_VA>, Enc_7470998, Requires<[HasV60T,UseHVX]> {
+tc_a3127e12, TypeCVI_VA>, Enc_eaa9f8, Requires<[HasV60T,UseHVX]> {
let Inst{7-2} = 0b010100;
let Inst{13-13} = 0b1;
let Inst{31-21} = 0b00011100100;
@@ -36197,7 +36558,7 @@ def V6_vgtb_xor : HInst<
(outs VecPredRegs:$Qx4),
(ins VecPredRegs:$Qx4in, VectorRegs:$Vu32, VectorRegs:$Vv32),
"$Qx4 ^= vcmp.gt($Vu32.b,$Vv32.b)",
-CVI_VA, TypeCVI_VA>, Enc_7470998, Requires<[HasV60T,UseHVX]> {
+tc_a3127e12, TypeCVI_VA>, Enc_eaa9f8, Requires<[HasV60T,UseHVX]> {
let Inst{7-2} = 0b100100;
let Inst{13-13} = 0b1;
let Inst{31-21} = 0b00011100100;
@@ -36210,7 +36571,7 @@ def V6_vgtb_xor_128B : HInst<
(outs VecPredRegs128B:$Qx4),
(ins VecPredRegs128B:$Qx4in, VectorRegs128B:$Vu32, VectorRegs128B:$Vv32),
"$Qx4 ^= vcmp.gt($Vu32.b,$Vv32.b)",
-CVI_VA, TypeCVI_VA>, Enc_7470998, Requires<[HasV60T,UseHVX]> {
+tc_a3127e12, TypeCVI_VA>, Enc_eaa9f8, Requires<[HasV60T,UseHVX]> {
let Inst{7-2} = 0b100100;
let Inst{13-13} = 0b1;
let Inst{31-21} = 0b00011100100;
@@ -36224,7 +36585,7 @@ def V6_vgth : HInst<
(outs VecPredRegs:$Qd4),
(ins VectorRegs:$Vu32, VectorRegs:$Vv32),
"$Qd4 = vcmp.gt($Vu32.h,$Vv32.h)",
-CVI_VA, TypeCVI_VA>, Enc_13983714, Requires<[HasV60T,UseHVX]> {
+tc_bbaf280e, TypeCVI_VA>, Enc_95441f, Requires<[HasV60T,UseHVX]> {
let Inst{7-2} = 0b000101;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011111100;
@@ -36236,7 +36597,7 @@ def V6_vgth_128B : HInst<
(outs VecPredRegs128B:$Qd4),
(ins VectorRegs128B:$Vu32, VectorRegs128B:$Vv32),
"$Qd4 = vcmp.gt($Vu32.h,$Vv32.h)",
-CVI_VA, TypeCVI_VA>, Enc_13983714, Requires<[HasV60T,UseHVX]> {
+tc_bbaf280e, TypeCVI_VA>, Enc_95441f, Requires<[HasV60T,UseHVX]> {
let Inst{7-2} = 0b000101;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011111100;
@@ -36249,7 +36610,7 @@ def V6_vgth_and : HInst<
(outs VecPredRegs:$Qx4),
(ins VecPredRegs:$Qx4in, VectorRegs:$Vu32, VectorRegs:$Vv32),
"$Qx4 &= vcmp.gt($Vu32.h,$Vv32.h)",
-CVI_VA, TypeCVI_VA>, Enc_7470998, Requires<[HasV60T,UseHVX]> {
+tc_a3127e12, TypeCVI_VA>, Enc_eaa9f8, Requires<[HasV60T,UseHVX]> {
let Inst{7-2} = 0b000101;
let Inst{13-13} = 0b1;
let Inst{31-21} = 0b00011100100;
@@ -36262,7 +36623,7 @@ def V6_vgth_and_128B : HInst<
(outs VecPredRegs128B:$Qx4),
(ins VecPredRegs128B:$Qx4in, VectorRegs128B:$Vu32, VectorRegs128B:$Vv32),
"$Qx4 &= vcmp.gt($Vu32.h,$Vv32.h)",
-CVI_VA, TypeCVI_VA>, Enc_7470998, Requires<[HasV60T,UseHVX]> {
+tc_a3127e12, TypeCVI_VA>, Enc_eaa9f8, Requires<[HasV60T,UseHVX]> {
let Inst{7-2} = 0b000101;
let Inst{13-13} = 0b1;
let Inst{31-21} = 0b00011100100;
@@ -36276,7 +36637,7 @@ def V6_vgth_or : HInst<
(outs VecPredRegs:$Qx4),
(ins VecPredRegs:$Qx4in, VectorRegs:$Vu32, VectorRegs:$Vv32),
"$Qx4 |= vcmp.gt($Vu32.h,$Vv32.h)",
-CVI_VA, TypeCVI_VA>, Enc_7470998, Requires<[HasV60T,UseHVX]> {
+tc_a3127e12, TypeCVI_VA>, Enc_eaa9f8, Requires<[HasV60T,UseHVX]> {
let Inst{7-2} = 0b010101;
let Inst{13-13} = 0b1;
let Inst{31-21} = 0b00011100100;
@@ -36290,7 +36651,7 @@ def V6_vgth_or_128B : HInst<
(outs VecPredRegs128B:$Qx4),
(ins VecPredRegs128B:$Qx4in, VectorRegs128B:$Vu32, VectorRegs128B:$Vv32),
"$Qx4 |= vcmp.gt($Vu32.h,$Vv32.h)",
-CVI_VA, TypeCVI_VA>, Enc_7470998, Requires<[HasV60T,UseHVX]> {
+tc_a3127e12, TypeCVI_VA>, Enc_eaa9f8, Requires<[HasV60T,UseHVX]> {
let Inst{7-2} = 0b010101;
let Inst{13-13} = 0b1;
let Inst{31-21} = 0b00011100100;
@@ -36305,7 +36666,7 @@ def V6_vgth_xor : HInst<
(outs VecPredRegs:$Qx4),
(ins VecPredRegs:$Qx4in, VectorRegs:$Vu32, VectorRegs:$Vv32),
"$Qx4 ^= vcmp.gt($Vu32.h,$Vv32.h)",
-CVI_VA, TypeCVI_VA>, Enc_7470998, Requires<[HasV60T,UseHVX]> {
+tc_a3127e12, TypeCVI_VA>, Enc_eaa9f8, Requires<[HasV60T,UseHVX]> {
let Inst{7-2} = 0b100101;
let Inst{13-13} = 0b1;
let Inst{31-21} = 0b00011100100;
@@ -36318,7 +36679,7 @@ def V6_vgth_xor_128B : HInst<
(outs VecPredRegs128B:$Qx4),
(ins VecPredRegs128B:$Qx4in, VectorRegs128B:$Vu32, VectorRegs128B:$Vv32),
"$Qx4 ^= vcmp.gt($Vu32.h,$Vv32.h)",
-CVI_VA, TypeCVI_VA>, Enc_7470998, Requires<[HasV60T,UseHVX]> {
+tc_a3127e12, TypeCVI_VA>, Enc_eaa9f8, Requires<[HasV60T,UseHVX]> {
let Inst{7-2} = 0b100101;
let Inst{13-13} = 0b1;
let Inst{31-21} = 0b00011100100;
@@ -36332,7 +36693,7 @@ def V6_vgtub : HInst<
(outs VecPredRegs:$Qd4),
(ins VectorRegs:$Vu32, VectorRegs:$Vv32),
"$Qd4 = vcmp.gt($Vu32.ub,$Vv32.ub)",
-CVI_VA, TypeCVI_VA>, Enc_13983714, Requires<[HasV60T,UseHVX]> {
+tc_bbaf280e, TypeCVI_VA>, Enc_95441f, Requires<[HasV60T,UseHVX]> {
let Inst{7-2} = 0b001000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011111100;
@@ -36344,7 +36705,7 @@ def V6_vgtub_128B : HInst<
(outs VecPredRegs128B:$Qd4),
(ins VectorRegs128B:$Vu32, VectorRegs128B:$Vv32),
"$Qd4 = vcmp.gt($Vu32.ub,$Vv32.ub)",
-CVI_VA, TypeCVI_VA>, Enc_13983714, Requires<[HasV60T,UseHVX]> {
+tc_bbaf280e, TypeCVI_VA>, Enc_95441f, Requires<[HasV60T,UseHVX]> {
let Inst{7-2} = 0b001000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011111100;
@@ -36357,7 +36718,7 @@ def V6_vgtub_and : HInst<
(outs VecPredRegs:$Qx4),
(ins VecPredRegs:$Qx4in, VectorRegs:$Vu32, VectorRegs:$Vv32),
"$Qx4 &= vcmp.gt($Vu32.ub,$Vv32.ub)",
-CVI_VA, TypeCVI_VA>, Enc_7470998, Requires<[HasV60T,UseHVX]> {
+tc_a3127e12, TypeCVI_VA>, Enc_eaa9f8, Requires<[HasV60T,UseHVX]> {
let Inst{7-2} = 0b001000;
let Inst{13-13} = 0b1;
let Inst{31-21} = 0b00011100100;
@@ -36370,7 +36731,7 @@ def V6_vgtub_and_128B : HInst<
(outs VecPredRegs128B:$Qx4),
(ins VecPredRegs128B:$Qx4in, VectorRegs128B:$Vu32, VectorRegs128B:$Vv32),
"$Qx4 &= vcmp.gt($Vu32.ub,$Vv32.ub)",
-CVI_VA, TypeCVI_VA>, Enc_7470998, Requires<[HasV60T,UseHVX]> {
+tc_a3127e12, TypeCVI_VA>, Enc_eaa9f8, Requires<[HasV60T,UseHVX]> {
let Inst{7-2} = 0b001000;
let Inst{13-13} = 0b1;
let Inst{31-21} = 0b00011100100;
@@ -36384,7 +36745,7 @@ def V6_vgtub_or : HInst<
(outs VecPredRegs:$Qx4),
(ins VecPredRegs:$Qx4in, VectorRegs:$Vu32, VectorRegs:$Vv32),
"$Qx4 |= vcmp.gt($Vu32.ub,$Vv32.ub)",
-CVI_VA, TypeCVI_VA>, Enc_7470998, Requires<[HasV60T,UseHVX]> {
+tc_a3127e12, TypeCVI_VA>, Enc_eaa9f8, Requires<[HasV60T,UseHVX]> {
let Inst{7-2} = 0b011000;
let Inst{13-13} = 0b1;
let Inst{31-21} = 0b00011100100;
@@ -36398,7 +36759,7 @@ def V6_vgtub_or_128B : HInst<
(outs VecPredRegs128B:$Qx4),
(ins VecPredRegs128B:$Qx4in, VectorRegs128B:$Vu32, VectorRegs128B:$Vv32),
"$Qx4 |= vcmp.gt($Vu32.ub,$Vv32.ub)",
-CVI_VA, TypeCVI_VA>, Enc_7470998, Requires<[HasV60T,UseHVX]> {
+tc_a3127e12, TypeCVI_VA>, Enc_eaa9f8, Requires<[HasV60T,UseHVX]> {
let Inst{7-2} = 0b011000;
let Inst{13-13} = 0b1;
let Inst{31-21} = 0b00011100100;
@@ -36413,7 +36774,7 @@ def V6_vgtub_xor : HInst<
(outs VecPredRegs:$Qx4),
(ins VecPredRegs:$Qx4in, VectorRegs:$Vu32, VectorRegs:$Vv32),
"$Qx4 ^= vcmp.gt($Vu32.ub,$Vv32.ub)",
-CVI_VA, TypeCVI_VA>, Enc_7470998, Requires<[HasV60T,UseHVX]> {
+tc_a3127e12, TypeCVI_VA>, Enc_eaa9f8, Requires<[HasV60T,UseHVX]> {
let Inst{7-2} = 0b101000;
let Inst{13-13} = 0b1;
let Inst{31-21} = 0b00011100100;
@@ -36426,7 +36787,7 @@ def V6_vgtub_xor_128B : HInst<
(outs VecPredRegs128B:$Qx4),
(ins VecPredRegs128B:$Qx4in, VectorRegs128B:$Vu32, VectorRegs128B:$Vv32),
"$Qx4 ^= vcmp.gt($Vu32.ub,$Vv32.ub)",
-CVI_VA, TypeCVI_VA>, Enc_7470998, Requires<[HasV60T,UseHVX]> {
+tc_a3127e12, TypeCVI_VA>, Enc_eaa9f8, Requires<[HasV60T,UseHVX]> {
let Inst{7-2} = 0b101000;
let Inst{13-13} = 0b1;
let Inst{31-21} = 0b00011100100;
@@ -36440,7 +36801,7 @@ def V6_vgtuh : HInst<
(outs VecPredRegs:$Qd4),
(ins VectorRegs:$Vu32, VectorRegs:$Vv32),
"$Qd4 = vcmp.gt($Vu32.uh,$Vv32.uh)",
-CVI_VA, TypeCVI_VA>, Enc_13983714, Requires<[HasV60T,UseHVX]> {
+tc_bbaf280e, TypeCVI_VA>, Enc_95441f, Requires<[HasV60T,UseHVX]> {
let Inst{7-2} = 0b001001;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011111100;
@@ -36452,7 +36813,7 @@ def V6_vgtuh_128B : HInst<
(outs VecPredRegs128B:$Qd4),
(ins VectorRegs128B:$Vu32, VectorRegs128B:$Vv32),
"$Qd4 = vcmp.gt($Vu32.uh,$Vv32.uh)",
-CVI_VA, TypeCVI_VA>, Enc_13983714, Requires<[HasV60T,UseHVX]> {
+tc_bbaf280e, TypeCVI_VA>, Enc_95441f, Requires<[HasV60T,UseHVX]> {
let Inst{7-2} = 0b001001;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011111100;
@@ -36465,7 +36826,7 @@ def V6_vgtuh_and : HInst<
(outs VecPredRegs:$Qx4),
(ins VecPredRegs:$Qx4in, VectorRegs:$Vu32, VectorRegs:$Vv32),
"$Qx4 &= vcmp.gt($Vu32.uh,$Vv32.uh)",
-CVI_VA, TypeCVI_VA>, Enc_7470998, Requires<[HasV60T,UseHVX]> {
+tc_a3127e12, TypeCVI_VA>, Enc_eaa9f8, Requires<[HasV60T,UseHVX]> {
let Inst{7-2} = 0b001001;
let Inst{13-13} = 0b1;
let Inst{31-21} = 0b00011100100;
@@ -36478,7 +36839,7 @@ def V6_vgtuh_and_128B : HInst<
(outs VecPredRegs128B:$Qx4),
(ins VecPredRegs128B:$Qx4in, VectorRegs128B:$Vu32, VectorRegs128B:$Vv32),
"$Qx4 &= vcmp.gt($Vu32.uh,$Vv32.uh)",
-CVI_VA, TypeCVI_VA>, Enc_7470998, Requires<[HasV60T,UseHVX]> {
+tc_a3127e12, TypeCVI_VA>, Enc_eaa9f8, Requires<[HasV60T,UseHVX]> {
let Inst{7-2} = 0b001001;
let Inst{13-13} = 0b1;
let Inst{31-21} = 0b00011100100;
@@ -36492,7 +36853,7 @@ def V6_vgtuh_or : HInst<
(outs VecPredRegs:$Qx4),
(ins VecPredRegs:$Qx4in, VectorRegs:$Vu32, VectorRegs:$Vv32),
"$Qx4 |= vcmp.gt($Vu32.uh,$Vv32.uh)",
-CVI_VA, TypeCVI_VA>, Enc_7470998, Requires<[HasV60T,UseHVX]> {
+tc_a3127e12, TypeCVI_VA>, Enc_eaa9f8, Requires<[HasV60T,UseHVX]> {
let Inst{7-2} = 0b011001;
let Inst{13-13} = 0b1;
let Inst{31-21} = 0b00011100100;
@@ -36506,7 +36867,7 @@ def V6_vgtuh_or_128B : HInst<
(outs VecPredRegs128B:$Qx4),
(ins VecPredRegs128B:$Qx4in, VectorRegs128B:$Vu32, VectorRegs128B:$Vv32),
"$Qx4 |= vcmp.gt($Vu32.uh,$Vv32.uh)",
-CVI_VA, TypeCVI_VA>, Enc_7470998, Requires<[HasV60T,UseHVX]> {
+tc_a3127e12, TypeCVI_VA>, Enc_eaa9f8, Requires<[HasV60T,UseHVX]> {
let Inst{7-2} = 0b011001;
let Inst{13-13} = 0b1;
let Inst{31-21} = 0b00011100100;
@@ -36521,7 +36882,7 @@ def V6_vgtuh_xor : HInst<
(outs VecPredRegs:$Qx4),
(ins VecPredRegs:$Qx4in, VectorRegs:$Vu32, VectorRegs:$Vv32),
"$Qx4 ^= vcmp.gt($Vu32.uh,$Vv32.uh)",
-CVI_VA, TypeCVI_VA>, Enc_7470998, Requires<[HasV60T,UseHVX]> {
+tc_a3127e12, TypeCVI_VA>, Enc_eaa9f8, Requires<[HasV60T,UseHVX]> {
let Inst{7-2} = 0b101001;
let Inst{13-13} = 0b1;
let Inst{31-21} = 0b00011100100;
@@ -36534,7 +36895,7 @@ def V6_vgtuh_xor_128B : HInst<
(outs VecPredRegs128B:$Qx4),
(ins VecPredRegs128B:$Qx4in, VectorRegs128B:$Vu32, VectorRegs128B:$Vv32),
"$Qx4 ^= vcmp.gt($Vu32.uh,$Vv32.uh)",
-CVI_VA, TypeCVI_VA>, Enc_7470998, Requires<[HasV60T,UseHVX]> {
+tc_a3127e12, TypeCVI_VA>, Enc_eaa9f8, Requires<[HasV60T,UseHVX]> {
let Inst{7-2} = 0b101001;
let Inst{13-13} = 0b1;
let Inst{31-21} = 0b00011100100;
@@ -36548,7 +36909,7 @@ def V6_vgtuw : HInst<
(outs VecPredRegs:$Qd4),
(ins VectorRegs:$Vu32, VectorRegs:$Vv32),
"$Qd4 = vcmp.gt($Vu32.uw,$Vv32.uw)",
-CVI_VA, TypeCVI_VA>, Enc_13983714, Requires<[HasV60T,UseHVX]> {
+tc_bbaf280e, TypeCVI_VA>, Enc_95441f, Requires<[HasV60T,UseHVX]> {
let Inst{7-2} = 0b001010;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011111100;
@@ -36560,7 +36921,7 @@ def V6_vgtuw_128B : HInst<
(outs VecPredRegs128B:$Qd4),
(ins VectorRegs128B:$Vu32, VectorRegs128B:$Vv32),
"$Qd4 = vcmp.gt($Vu32.uw,$Vv32.uw)",
-CVI_VA, TypeCVI_VA>, Enc_13983714, Requires<[HasV60T,UseHVX]> {
+tc_bbaf280e, TypeCVI_VA>, Enc_95441f, Requires<[HasV60T,UseHVX]> {
let Inst{7-2} = 0b001010;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011111100;
@@ -36573,7 +36934,7 @@ def V6_vgtuw_and : HInst<
(outs VecPredRegs:$Qx4),
(ins VecPredRegs:$Qx4in, VectorRegs:$Vu32, VectorRegs:$Vv32),
"$Qx4 &= vcmp.gt($Vu32.uw,$Vv32.uw)",
-CVI_VA, TypeCVI_VA>, Enc_7470998, Requires<[HasV60T,UseHVX]> {
+tc_a3127e12, TypeCVI_VA>, Enc_eaa9f8, Requires<[HasV60T,UseHVX]> {
let Inst{7-2} = 0b001010;
let Inst{13-13} = 0b1;
let Inst{31-21} = 0b00011100100;
@@ -36586,7 +36947,7 @@ def V6_vgtuw_and_128B : HInst<
(outs VecPredRegs128B:$Qx4),
(ins VecPredRegs128B:$Qx4in, VectorRegs128B:$Vu32, VectorRegs128B:$Vv32),
"$Qx4 &= vcmp.gt($Vu32.uw,$Vv32.uw)",
-CVI_VA, TypeCVI_VA>, Enc_7470998, Requires<[HasV60T,UseHVX]> {
+tc_a3127e12, TypeCVI_VA>, Enc_eaa9f8, Requires<[HasV60T,UseHVX]> {
let Inst{7-2} = 0b001010;
let Inst{13-13} = 0b1;
let Inst{31-21} = 0b00011100100;
@@ -36600,7 +36961,7 @@ def V6_vgtuw_or : HInst<
(outs VecPredRegs:$Qx4),
(ins VecPredRegs:$Qx4in, VectorRegs:$Vu32, VectorRegs:$Vv32),
"$Qx4 |= vcmp.gt($Vu32.uw,$Vv32.uw)",
-CVI_VA, TypeCVI_VA>, Enc_7470998, Requires<[HasV60T,UseHVX]> {
+tc_a3127e12, TypeCVI_VA>, Enc_eaa9f8, Requires<[HasV60T,UseHVX]> {
let Inst{7-2} = 0b011010;
let Inst{13-13} = 0b1;
let Inst{31-21} = 0b00011100100;
@@ -36614,7 +36975,7 @@ def V6_vgtuw_or_128B : HInst<
(outs VecPredRegs128B:$Qx4),
(ins VecPredRegs128B:$Qx4in, VectorRegs128B:$Vu32, VectorRegs128B:$Vv32),
"$Qx4 |= vcmp.gt($Vu32.uw,$Vv32.uw)",
-CVI_VA, TypeCVI_VA>, Enc_7470998, Requires<[HasV60T,UseHVX]> {
+tc_a3127e12, TypeCVI_VA>, Enc_eaa9f8, Requires<[HasV60T,UseHVX]> {
let Inst{7-2} = 0b011010;
let Inst{13-13} = 0b1;
let Inst{31-21} = 0b00011100100;
@@ -36629,7 +36990,7 @@ def V6_vgtuw_xor : HInst<
(outs VecPredRegs:$Qx4),
(ins VecPredRegs:$Qx4in, VectorRegs:$Vu32, VectorRegs:$Vv32),
"$Qx4 ^= vcmp.gt($Vu32.uw,$Vv32.uw)",
-CVI_VA, TypeCVI_VA>, Enc_7470998, Requires<[HasV60T,UseHVX]> {
+tc_a3127e12, TypeCVI_VA>, Enc_eaa9f8, Requires<[HasV60T,UseHVX]> {
let Inst{7-2} = 0b101010;
let Inst{13-13} = 0b1;
let Inst{31-21} = 0b00011100100;
@@ -36642,7 +37003,7 @@ def V6_vgtuw_xor_128B : HInst<
(outs VecPredRegs128B:$Qx4),
(ins VecPredRegs128B:$Qx4in, VectorRegs128B:$Vu32, VectorRegs128B:$Vv32),
"$Qx4 ^= vcmp.gt($Vu32.uw,$Vv32.uw)",
-CVI_VA, TypeCVI_VA>, Enc_7470998, Requires<[HasV60T,UseHVX]> {
+tc_a3127e12, TypeCVI_VA>, Enc_eaa9f8, Requires<[HasV60T,UseHVX]> {
let Inst{7-2} = 0b101010;
let Inst{13-13} = 0b1;
let Inst{31-21} = 0b00011100100;
@@ -36656,7 +37017,7 @@ def V6_vgtw : HInst<
(outs VecPredRegs:$Qd4),
(ins VectorRegs:$Vu32, VectorRegs:$Vv32),
"$Qd4 = vcmp.gt($Vu32.w,$Vv32.w)",
-CVI_VA, TypeCVI_VA>, Enc_13983714, Requires<[HasV60T,UseHVX]> {
+tc_bbaf280e, TypeCVI_VA>, Enc_95441f, Requires<[HasV60T,UseHVX]> {
let Inst{7-2} = 0b000110;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011111100;
@@ -36668,7 +37029,7 @@ def V6_vgtw_128B : HInst<
(outs VecPredRegs128B:$Qd4),
(ins VectorRegs128B:$Vu32, VectorRegs128B:$Vv32),
"$Qd4 = vcmp.gt($Vu32.w,$Vv32.w)",
-CVI_VA, TypeCVI_VA>, Enc_13983714, Requires<[HasV60T,UseHVX]> {
+tc_bbaf280e, TypeCVI_VA>, Enc_95441f, Requires<[HasV60T,UseHVX]> {
let Inst{7-2} = 0b000110;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011111100;
@@ -36681,7 +37042,7 @@ def V6_vgtw_and : HInst<
(outs VecPredRegs:$Qx4),
(ins VecPredRegs:$Qx4in, VectorRegs:$Vu32, VectorRegs:$Vv32),
"$Qx4 &= vcmp.gt($Vu32.w,$Vv32.w)",
-CVI_VA, TypeCVI_VA>, Enc_7470998, Requires<[HasV60T,UseHVX]> {
+tc_a3127e12, TypeCVI_VA>, Enc_eaa9f8, Requires<[HasV60T,UseHVX]> {
let Inst{7-2} = 0b000110;
let Inst{13-13} = 0b1;
let Inst{31-21} = 0b00011100100;
@@ -36694,7 +37055,7 @@ def V6_vgtw_and_128B : HInst<
(outs VecPredRegs128B:$Qx4),
(ins VecPredRegs128B:$Qx4in, VectorRegs128B:$Vu32, VectorRegs128B:$Vv32),
"$Qx4 &= vcmp.gt($Vu32.w,$Vv32.w)",
-CVI_VA, TypeCVI_VA>, Enc_7470998, Requires<[HasV60T,UseHVX]> {
+tc_a3127e12, TypeCVI_VA>, Enc_eaa9f8, Requires<[HasV60T,UseHVX]> {
let Inst{7-2} = 0b000110;
let Inst{13-13} = 0b1;
let Inst{31-21} = 0b00011100100;
@@ -36708,7 +37069,7 @@ def V6_vgtw_or : HInst<
(outs VecPredRegs:$Qx4),
(ins VecPredRegs:$Qx4in, VectorRegs:$Vu32, VectorRegs:$Vv32),
"$Qx4 |= vcmp.gt($Vu32.w,$Vv32.w)",
-CVI_VA, TypeCVI_VA>, Enc_7470998, Requires<[HasV60T,UseHVX]> {
+tc_a3127e12, TypeCVI_VA>, Enc_eaa9f8, Requires<[HasV60T,UseHVX]> {
let Inst{7-2} = 0b010110;
let Inst{13-13} = 0b1;
let Inst{31-21} = 0b00011100100;
@@ -36722,7 +37083,7 @@ def V6_vgtw_or_128B : HInst<
(outs VecPredRegs128B:$Qx4),
(ins VecPredRegs128B:$Qx4in, VectorRegs128B:$Vu32, VectorRegs128B:$Vv32),
"$Qx4 |= vcmp.gt($Vu32.w,$Vv32.w)",
-CVI_VA, TypeCVI_VA>, Enc_7470998, Requires<[HasV60T,UseHVX]> {
+tc_a3127e12, TypeCVI_VA>, Enc_eaa9f8, Requires<[HasV60T,UseHVX]> {
let Inst{7-2} = 0b010110;
let Inst{13-13} = 0b1;
let Inst{31-21} = 0b00011100100;
@@ -36737,7 +37098,7 @@ def V6_vgtw_xor : HInst<
(outs VecPredRegs:$Qx4),
(ins VecPredRegs:$Qx4in, VectorRegs:$Vu32, VectorRegs:$Vv32),
"$Qx4 ^= vcmp.gt($Vu32.w,$Vv32.w)",
-CVI_VA, TypeCVI_VA>, Enc_7470998, Requires<[HasV60T,UseHVX]> {
+tc_a3127e12, TypeCVI_VA>, Enc_eaa9f8, Requires<[HasV60T,UseHVX]> {
let Inst{7-2} = 0b100110;
let Inst{13-13} = 0b1;
let Inst{31-21} = 0b00011100100;
@@ -36750,7 +37111,7 @@ def V6_vgtw_xor_128B : HInst<
(outs VecPredRegs128B:$Qx4),
(ins VecPredRegs128B:$Qx4in, VectorRegs128B:$Vu32, VectorRegs128B:$Vv32),
"$Qx4 ^= vcmp.gt($Vu32.w,$Vv32.w)",
-CVI_VA, TypeCVI_VA>, Enc_7470998, Requires<[HasV60T,UseHVX]> {
+tc_a3127e12, TypeCVI_VA>, Enc_eaa9f8, Requires<[HasV60T,UseHVX]> {
let Inst{7-2} = 0b100110;
let Inst{13-13} = 0b1;
let Inst{31-21} = 0b00011100100;
@@ -36764,7 +37125,7 @@ def V6_vhist : HInst<
(outs),
(ins),
"vhist",
-CVI_HIST, TypeCVI_HIST>, Enc_0, Requires<[HasV60T,UseHVX]> {
+tc_e5053c8f, TypeCVI_HIST>, Enc_e3b0c4, Requires<[HasV60T,UseHVX]> {
let Inst{13-0} = 0b10000010000000;
let Inst{31-16} = 0b0001111000000000;
let DecoderNamespace = "EXT_mmvec";
@@ -36773,7 +37134,7 @@ def V6_vhist_128B : HInst<
(outs),
(ins),
"vhist",
-CVI_HIST, TypeCVI_HIST>, Enc_0, Requires<[HasV60T,UseHVX]> {
+tc_e5053c8f, TypeCVI_HIST>, Enc_e3b0c4, Requires<[HasV60T,UseHVX]> {
let Inst{13-0} = 0b10000010000000;
let Inst{31-16} = 0b0001111000000000;
let DecoderNamespace = "EXT_mmvec";
@@ -36783,7 +37144,7 @@ def V6_vhistq : HInst<
(outs),
(ins VecPredRegs:$Qv4),
"vhist($Qv4)",
-CVI_HIST, TypeCVI_HIST>, Enc_4109168, Requires<[HasV60T,UseHVX]> {
+tc_cedf314b, TypeCVI_HIST>, Enc_217147, Requires<[HasV60T,UseHVX]> {
let Inst{13-0} = 0b10000010000000;
let Inst{21-16} = 0b000010;
let Inst{31-24} = 0b00011110;
@@ -36793,7 +37154,7 @@ def V6_vhistq_128B : HInst<
(outs),
(ins VecPredRegs128B:$Qv4),
"vhist($Qv4)",
-CVI_HIST, TypeCVI_HIST>, Enc_4109168, Requires<[HasV60T,UseHVX]> {
+tc_cedf314b, TypeCVI_HIST>, Enc_217147, Requires<[HasV60T,UseHVX]> {
let Inst{13-0} = 0b10000010000000;
let Inst{21-16} = 0b000010;
let Inst{31-24} = 0b00011110;
@@ -36804,7 +37165,7 @@ def V6_vinsertwr : HInst<
(outs VectorRegs:$Vx32),
(ins VectorRegs:$Vx32in, IntRegs:$Rt32),
"$Vx32.w = vinsert($Rt32)",
-CVI_VX_LATE, TypeCVI_VX>, Enc_313333, Requires<[HasV60T,UseHVX]> {
+tc_e231aa4f, TypeCVI_VX_LATE>, Enc_569cfe, Requires<[HasV60T,UseHVX]> {
let Inst{13-5} = 0b100000001;
let Inst{31-21} = 0b00011001101;
let hasNewValue = 1;
@@ -36816,7 +37177,7 @@ def V6_vinsertwr_128B : HInst<
(outs VectorRegs128B:$Vx32),
(ins VectorRegs128B:$Vx32in, IntRegs:$Rt32),
"$Vx32.w = vinsert($Rt32)",
-CVI_VX_LATE, TypeCVI_VX>, Enc_313333, Requires<[HasV60T,UseHVX]> {
+tc_e231aa4f, TypeCVI_VX_LATE>, Enc_569cfe, Requires<[HasV60T,UseHVX]> {
let Inst{13-5} = 0b100000001;
let Inst{31-21} = 0b00011001101;
let hasNewValue = 1;
@@ -36829,7 +37190,7 @@ def V6_vlalignb : HInst<
(outs VectorRegs:$Vd32),
(ins VectorRegs:$Vu32, VectorRegs:$Vv32, IntRegsLow8:$Rt8),
"$Vd32 = vlalign($Vu32,$Vv32,$Rt8)",
-CVI_VP_LONG, TypeCVI_VP>, Enc_11083408, Requires<[HasV60T,UseHVX]> {
+tc_c4b515c5, TypeCVI_VP>, Enc_a30110, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b001;
let Inst{13-13} = 0b0;
let Inst{31-24} = 0b00011011;
@@ -36841,7 +37202,7 @@ def V6_vlalignb_128B : HInst<
(outs VectorRegs128B:$Vd32),
(ins VectorRegs128B:$Vu32, VectorRegs128B:$Vv32, IntRegsLow8:$Rt8),
"$Vd32 = vlalign($Vu32,$Vv32,$Rt8)",
-CVI_VP_LONG, TypeCVI_VP>, Enc_11083408, Requires<[HasV60T,UseHVX]> {
+tc_c4b515c5, TypeCVI_VP>, Enc_a30110, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b001;
let Inst{13-13} = 0b0;
let Inst{31-24} = 0b00011011;
@@ -36854,7 +37215,7 @@ def V6_vlalignbi : HInst<
(outs VectorRegs:$Vd32),
(ins VectorRegs:$Vu32, VectorRegs:$Vv32, u3_0Imm:$Ii),
"$Vd32 = vlalign($Vu32,$Vv32,#$Ii)",
-CVI_VP_LONG, TypeCVI_VP>, Enc_7171569, Requires<[HasV60T,UseHVX]> {
+tc_c4b515c5, TypeCVI_VP>, Enc_0b2e5b, Requires<[HasV60T,UseHVX]> {
let Inst{13-13} = 0b1;
let Inst{31-21} = 0b00011110011;
let hasNewValue = 1;
@@ -36865,7 +37226,7 @@ def V6_vlalignbi_128B : HInst<
(outs VectorRegs128B:$Vd32),
(ins VectorRegs128B:$Vu32, VectorRegs128B:$Vv32, u3_0Imm:$Ii),
"$Vd32 = vlalign($Vu32,$Vv32,#$Ii)",
-CVI_VP_LONG, TypeCVI_VP>, Enc_7171569, Requires<[HasV60T,UseHVX]> {
+tc_c4b515c5, TypeCVI_VP>, Enc_0b2e5b, Requires<[HasV60T,UseHVX]> {
let Inst{13-13} = 0b1;
let Inst{31-21} = 0b00011110011;
let hasNewValue = 1;
@@ -36877,7 +37238,7 @@ def V6_vlsrb : HInst<
(outs VectorRegs:$Vd32),
(ins VectorRegs:$Vu32, IntRegs:$Rt32),
"$Vd32.ub = vlsr($Vu32.ub,$Rt32)",
-CVI_VS, TypeCVI_VS>, Enc_16214129, Requires<[HasV62T,UseHVX]> {
+tc_41f4b64e, TypeCVI_VS>, Enc_b087ac, Requires<[HasV62T,UseHVX]> {
let Inst{7-5} = 0b011;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011001100;
@@ -36889,7 +37250,7 @@ def V6_vlsrb_128B : HInst<
(outs VectorRegs128B:$Vd32),
(ins VectorRegs128B:$Vu32, IntRegs:$Rt32),
"$Vd32.ub = vlsr($Vu32.ub,$Rt32)",
-CVI_VS, TypeCVI_VS>, Enc_16214129, Requires<[HasV62T,UseHVX]> {
+tc_41f4b64e, TypeCVI_VS>, Enc_b087ac, Requires<[HasV62T,UseHVX]> {
let Inst{7-5} = 0b011;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011001100;
@@ -36902,7 +37263,7 @@ def V6_vlsrh : HInst<
(outs VectorRegs:$Vd32),
(ins VectorRegs:$Vu32, IntRegs:$Rt32),
"$Vd32.uh = vlsr($Vu32.uh,$Rt32)",
-CVI_VS, TypeCVI_VS>, Enc_16214129, Requires<[HasV60T,UseHVX]> {
+tc_41f4b64e, TypeCVI_VS>, Enc_b087ac, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b010;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011001100;
@@ -36914,7 +37275,7 @@ def V6_vlsrh_128B : HInst<
(outs VectorRegs128B:$Vd32),
(ins VectorRegs128B:$Vu32, IntRegs:$Rt32),
"$Vd32.uh = vlsr($Vu32.uh,$Rt32)",
-CVI_VS, TypeCVI_VS>, Enc_16214129, Requires<[HasV60T,UseHVX]> {
+tc_41f4b64e, TypeCVI_VS>, Enc_b087ac, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b010;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011001100;
@@ -36950,7 +37311,7 @@ def V6_vlsrhv : HInst<
(outs VectorRegs:$Vd32),
(ins VectorRegs:$Vu32, VectorRegs:$Vv32),
"$Vd32.h = vlsr($Vu32.h,$Vv32.h)",
-CVI_VS, TypeCVI_VS>, Enc_6223403, Requires<[HasV60T,UseHVX]> {
+tc_45453b98, TypeCVI_VS>, Enc_45364e, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b010;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011111101;
@@ -36962,7 +37323,7 @@ def V6_vlsrhv_128B : HInst<
(outs VectorRegs128B:$Vd32),
(ins VectorRegs128B:$Vu32, VectorRegs128B:$Vv32),
"$Vd32.h = vlsr($Vu32.h,$Vv32.h)",
-CVI_VS, TypeCVI_VS>, Enc_6223403, Requires<[HasV60T,UseHVX]> {
+tc_45453b98, TypeCVI_VS>, Enc_45364e, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b010;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011111101;
@@ -36998,7 +37359,7 @@ def V6_vlsrw : HInst<
(outs VectorRegs:$Vd32),
(ins VectorRegs:$Vu32, IntRegs:$Rt32),
"$Vd32.uw = vlsr($Vu32.uw,$Rt32)",
-CVI_VS, TypeCVI_VS>, Enc_16214129, Requires<[HasV60T,UseHVX]> {
+tc_41f4b64e, TypeCVI_VS>, Enc_b087ac, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b001;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011001100;
@@ -37010,7 +37371,7 @@ def V6_vlsrw_128B : HInst<
(outs VectorRegs128B:$Vd32),
(ins VectorRegs128B:$Vu32, IntRegs:$Rt32),
"$Vd32.uw = vlsr($Vu32.uw,$Rt32)",
-CVI_VS, TypeCVI_VS>, Enc_16214129, Requires<[HasV60T,UseHVX]> {
+tc_41f4b64e, TypeCVI_VS>, Enc_b087ac, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b001;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011001100;
@@ -37046,7 +37407,7 @@ def V6_vlsrwv : HInst<
(outs VectorRegs:$Vd32),
(ins VectorRegs:$Vu32, VectorRegs:$Vv32),
"$Vd32.w = vlsr($Vu32.w,$Vv32.w)",
-CVI_VS, TypeCVI_VS>, Enc_6223403, Requires<[HasV60T,UseHVX]> {
+tc_45453b98, TypeCVI_VS>, Enc_45364e, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b001;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011111101;
@@ -37058,7 +37419,7 @@ def V6_vlsrwv_128B : HInst<
(outs VectorRegs128B:$Vd32),
(ins VectorRegs128B:$Vu32, VectorRegs128B:$Vv32),
"$Vd32.w = vlsr($Vu32.w,$Vv32.w)",
-CVI_VS, TypeCVI_VS>, Enc_6223403, Requires<[HasV60T,UseHVX]> {
+tc_45453b98, TypeCVI_VS>, Enc_45364e, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b001;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011111101;
@@ -37094,7 +37455,7 @@ def V6_vlutvvb : HInst<
(outs VectorRegs:$Vd32),
(ins VectorRegs:$Vu32, VectorRegs:$Vv32, IntRegsLow8:$Rt8),
"$Vd32.b = vlut32($Vu32.b,$Vv32.b,$Rt8)",
-CVI_VP_LONG, TypeCVI_VP>, Enc_11083408, Requires<[HasV60T,UseHVX]> {
+tc_c4b515c5, TypeCVI_VP>, Enc_a30110, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b001;
let Inst{13-13} = 0b1;
let Inst{31-24} = 0b00011011;
@@ -37106,7 +37467,7 @@ def V6_vlutvvb_128B : HInst<
(outs VectorRegs128B:$Vd32),
(ins VectorRegs128B:$Vu32, VectorRegs128B:$Vv32, IntRegsLow8:$Rt8),
"$Vd32.b = vlut32($Vu32.b,$Vv32.b,$Rt8)",
-CVI_VP_LONG, TypeCVI_VP>, Enc_11083408, Requires<[HasV60T,UseHVX]> {
+tc_c4b515c5, TypeCVI_VP>, Enc_a30110, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b001;
let Inst{13-13} = 0b1;
let Inst{31-24} = 0b00011011;
@@ -37119,7 +37480,7 @@ def V6_vlutvvb_nm : HInst<
(outs VectorRegs:$Vd32),
(ins VectorRegs:$Vu32, VectorRegs:$Vv32, IntRegsLow8:$Rt8),
"$Vd32.b = vlut32($Vu32.b,$Vv32.b,$Rt8):nomatch",
-CVI_VP_LONG, TypeCVI_VP>, Enc_11083408, Requires<[HasV62T,UseHVX]> {
+tc_c4b515c5, TypeCVI_VP>, Enc_a30110, Requires<[HasV62T,UseHVX]> {
let Inst{7-5} = 0b011;
let Inst{13-13} = 0b0;
let Inst{31-24} = 0b00011000;
@@ -37131,7 +37492,7 @@ def V6_vlutvvb_nm_128B : HInst<
(outs VectorRegs128B:$Vd32),
(ins VectorRegs128B:$Vu32, VectorRegs128B:$Vv32, IntRegsLow8:$Rt8),
"$Vd32.b = vlut32($Vu32.b,$Vv32.b,$Rt8):nomatch",
-CVI_VP_LONG, TypeCVI_VP>, Enc_11083408, Requires<[HasV62T,UseHVX]> {
+tc_c4b515c5, TypeCVI_VP>, Enc_a30110, Requires<[HasV62T,UseHVX]> {
let Inst{7-5} = 0b011;
let Inst{13-13} = 0b0;
let Inst{31-24} = 0b00011000;
@@ -37144,7 +37505,7 @@ def V6_vlutvvb_oracc : HInst<
(outs VectorRegs:$Vx32),
(ins VectorRegs:$Vx32in, VectorRegs:$Vu32, VectorRegs:$Vv32, IntRegsLow8:$Rt8),
"$Vx32.b |= vlut32($Vu32.b,$Vv32.b,$Rt8)",
-CVI_VP_VS_LONG, TypeCVI_VP_VS>, Enc_8877260, Requires<[HasV60T,UseHVX]> {
+tc_cbf6d1dc, TypeCVI_VP_VS>, Enc_245865, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b101;
let Inst{13-13} = 0b1;
let Inst{31-24} = 0b00011011;
@@ -37158,7 +37519,7 @@ def V6_vlutvvb_oracc_128B : HInst<
(outs VectorRegs128B:$Vx32),
(ins VectorRegs128B:$Vx32in, VectorRegs128B:$Vu32, VectorRegs128B:$Vv32, IntRegsLow8:$Rt8),
"$Vx32.b |= vlut32($Vu32.b,$Vv32.b,$Rt8)",
-CVI_VP_VS_LONG, TypeCVI_VP_VS>, Enc_8877260, Requires<[HasV60T,UseHVX]> {
+tc_cbf6d1dc, TypeCVI_VP_VS>, Enc_245865, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b101;
let Inst{13-13} = 0b1;
let Inst{31-24} = 0b00011011;
@@ -37173,7 +37534,7 @@ def V6_vlutvvb_oracci : HInst<
(outs VectorRegs:$Vx32),
(ins VectorRegs:$Vx32in, VectorRegs:$Vu32, VectorRegs:$Vv32, u3_0Imm:$Ii),
"$Vx32.b |= vlut32($Vu32.b,$Vv32.b,#$Ii)",
-CVI_VP_VS_LONG, TypeCVI_VP_VS>, Enc_8280533, Requires<[HasV62T,UseHVX]> {
+tc_cbf6d1dc, TypeCVI_VP_VS>, Enc_cd4705, Requires<[HasV62T,UseHVX]> {
let Inst{13-13} = 0b1;
let Inst{31-21} = 0b00011100110;
let hasNewValue = 1;
@@ -37186,7 +37547,7 @@ def V6_vlutvvb_oracci_128B : HInst<
(outs VectorRegs128B:$Vx32),
(ins VectorRegs128B:$Vx32in, VectorRegs128B:$Vu32, VectorRegs128B:$Vv32, u3_0Imm:$Ii),
"$Vx32.b |= vlut32($Vu32.b,$Vv32.b,#$Ii)",
-CVI_VP_VS_LONG, TypeCVI_VP_VS>, Enc_8280533, Requires<[HasV62T,UseHVX]> {
+tc_cbf6d1dc, TypeCVI_VP_VS>, Enc_cd4705, Requires<[HasV62T,UseHVX]> {
let Inst{13-13} = 0b1;
let Inst{31-21} = 0b00011100110;
let hasNewValue = 1;
@@ -37200,7 +37561,7 @@ def V6_vlutvvbi : HInst<
(outs VectorRegs:$Vd32),
(ins VectorRegs:$Vu32, VectorRegs:$Vv32, u3_0Imm:$Ii),
"$Vd32.b = vlut32($Vu32.b,$Vv32.b,#$Ii)",
-CVI_VP_LONG, TypeCVI_VP>, Enc_7171569, Requires<[HasV62T,UseHVX]> {
+tc_c4b515c5, TypeCVI_VP>, Enc_0b2e5b, Requires<[HasV62T,UseHVX]> {
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011110001;
let hasNewValue = 1;
@@ -37211,7 +37572,7 @@ def V6_vlutvvbi_128B : HInst<
(outs VectorRegs128B:$Vd32),
(ins VectorRegs128B:$Vu32, VectorRegs128B:$Vv32, u3_0Imm:$Ii),
"$Vd32.b = vlut32($Vu32.b,$Vv32.b,#$Ii)",
-CVI_VP_LONG, TypeCVI_VP>, Enc_7171569, Requires<[HasV62T,UseHVX]> {
+tc_c4b515c5, TypeCVI_VP>, Enc_0b2e5b, Requires<[HasV62T,UseHVX]> {
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011110001;
let hasNewValue = 1;
@@ -37223,7 +37584,7 @@ def V6_vlutvwh : HInst<
(outs VecDblRegs:$Vdd32),
(ins VectorRegs:$Vu32, VectorRegs:$Vv32, IntRegsLow8:$Rt8),
"$Vdd32.h = vlut16($Vu32.b,$Vv32.h,$Rt8)",
-CVI_VP_VS_LONG, TypeCVI_VP_VS>, Enc_14767681, Requires<[HasV60T,UseHVX]> {
+tc_4e2a5159, TypeCVI_VP_VS>, Enc_24a7dc, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b110;
let Inst{13-13} = 0b1;
let Inst{31-24} = 0b00011011;
@@ -37235,7 +37596,7 @@ def V6_vlutvwh_128B : HInst<
(outs VecDblRegs128B:$Vdd32),
(ins VectorRegs128B:$Vu32, VectorRegs128B:$Vv32, IntRegsLow8:$Rt8),
"$Vdd32.h = vlut16($Vu32.b,$Vv32.h,$Rt8)",
-CVI_VP_VS_LONG, TypeCVI_VP_VS>, Enc_14767681, Requires<[HasV60T,UseHVX]> {
+tc_4e2a5159, TypeCVI_VP_VS>, Enc_24a7dc, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b110;
let Inst{13-13} = 0b1;
let Inst{31-24} = 0b00011011;
@@ -37248,7 +37609,7 @@ def V6_vlutvwh_nm : HInst<
(outs VecDblRegs:$Vdd32),
(ins VectorRegs:$Vu32, VectorRegs:$Vv32, IntRegsLow8:$Rt8),
"$Vdd32.h = vlut16($Vu32.b,$Vv32.h,$Rt8):nomatch",
-CVI_VP_VS_LONG, TypeCVI_VP_VS>, Enc_14767681, Requires<[HasV62T,UseHVX]> {
+tc_4e2a5159, TypeCVI_VP_VS>, Enc_24a7dc, Requires<[HasV62T,UseHVX]> {
let Inst{7-5} = 0b100;
let Inst{13-13} = 0b0;
let Inst{31-24} = 0b00011000;
@@ -37260,7 +37621,7 @@ def V6_vlutvwh_nm_128B : HInst<
(outs VecDblRegs128B:$Vdd32),
(ins VectorRegs128B:$Vu32, VectorRegs128B:$Vv32, IntRegsLow8:$Rt8),
"$Vdd32.h = vlut16($Vu32.b,$Vv32.h,$Rt8):nomatch",
-CVI_VP_VS_LONG, TypeCVI_VP_VS>, Enc_14767681, Requires<[HasV62T,UseHVX]> {
+tc_4e2a5159, TypeCVI_VP_VS>, Enc_24a7dc, Requires<[HasV62T,UseHVX]> {
let Inst{7-5} = 0b100;
let Inst{13-13} = 0b0;
let Inst{31-24} = 0b00011000;
@@ -37273,7 +37634,7 @@ def V6_vlutvwh_oracc : HInst<
(outs VecDblRegs:$Vxx32),
(ins VecDblRegs:$Vxx32in, VectorRegs:$Vu32, VectorRegs:$Vv32, IntRegsLow8:$Rt8),
"$Vxx32.h |= vlut16($Vu32.b,$Vv32.h,$Rt8)",
-CVI_VP_VS_LONG, TypeCVI_VP_VS>, Enc_16213761, Requires<[HasV60T,UseHVX]> {
+tc_cbf6d1dc, TypeCVI_VP_VS>, Enc_7b523d, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b111;
let Inst{13-13} = 0b1;
let Inst{31-24} = 0b00011011;
@@ -37287,7 +37648,7 @@ def V6_vlutvwh_oracc_128B : HInst<
(outs VecDblRegs128B:$Vxx32),
(ins VecDblRegs128B:$Vxx32in, VectorRegs128B:$Vu32, VectorRegs128B:$Vv32, IntRegsLow8:$Rt8),
"$Vxx32.h |= vlut16($Vu32.b,$Vv32.h,$Rt8)",
-CVI_VP_VS_LONG, TypeCVI_VP_VS>, Enc_16213761, Requires<[HasV60T,UseHVX]> {
+tc_cbf6d1dc, TypeCVI_VP_VS>, Enc_7b523d, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b111;
let Inst{13-13} = 0b1;
let Inst{31-24} = 0b00011011;
@@ -37302,7 +37663,7 @@ def V6_vlutvwh_oracci : HInst<
(outs VecDblRegs:$Vxx32),
(ins VecDblRegs:$Vxx32in, VectorRegs:$Vu32, VectorRegs:$Vv32, u3_0Imm:$Ii),
"$Vxx32.h |= vlut16($Vu32.b,$Vv32.h,#$Ii)",
-CVI_VP_VS_LONG, TypeCVI_VP_VS>, Enc_3457570, Requires<[HasV62T,UseHVX]> {
+tc_cbf6d1dc, TypeCVI_VP_VS>, Enc_1178da, Requires<[HasV62T,UseHVX]> {
let Inst{13-13} = 0b1;
let Inst{31-21} = 0b00011100111;
let hasNewValue = 1;
@@ -37315,7 +37676,7 @@ def V6_vlutvwh_oracci_128B : HInst<
(outs VecDblRegs128B:$Vxx32),
(ins VecDblRegs128B:$Vxx32in, VectorRegs128B:$Vu32, VectorRegs128B:$Vv32, u3_0Imm:$Ii),
"$Vxx32.h |= vlut16($Vu32.b,$Vv32.h,#$Ii)",
-CVI_VP_VS_LONG, TypeCVI_VP_VS>, Enc_3457570, Requires<[HasV62T,UseHVX]> {
+tc_cbf6d1dc, TypeCVI_VP_VS>, Enc_1178da, Requires<[HasV62T,UseHVX]> {
let Inst{13-13} = 0b1;
let Inst{31-21} = 0b00011100111;
let hasNewValue = 1;
@@ -37329,7 +37690,7 @@ def V6_vlutvwhi : HInst<
(outs VecDblRegs:$Vdd32),
(ins VectorRegs:$Vu32, VectorRegs:$Vv32, u3_0Imm:$Ii),
"$Vdd32.h = vlut16($Vu32.b,$Vv32.h,#$Ii)",
-CVI_VP_VS_LONG, TypeCVI_VP_VS>, Enc_13261538, Requires<[HasV62T,UseHVX]> {
+tc_4e2a5159, TypeCVI_VP_VS>, Enc_4b39e4, Requires<[HasV62T,UseHVX]> {
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011110011;
let hasNewValue = 1;
@@ -37340,7 +37701,7 @@ def V6_vlutvwhi_128B : HInst<
(outs VecDblRegs128B:$Vdd32),
(ins VectorRegs128B:$Vu32, VectorRegs128B:$Vv32, u3_0Imm:$Ii),
"$Vdd32.h = vlut16($Vu32.b,$Vv32.h,#$Ii)",
-CVI_VP_VS_LONG, TypeCVI_VP_VS>, Enc_13261538, Requires<[HasV62T,UseHVX]> {
+tc_4e2a5159, TypeCVI_VP_VS>, Enc_4b39e4, Requires<[HasV62T,UseHVX]> {
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011110011;
let hasNewValue = 1;
@@ -37352,7 +37713,7 @@ def V6_vmaxb : HInst<
(outs VectorRegs:$Vd32),
(ins VectorRegs:$Vu32, VectorRegs:$Vv32),
"$Vd32.b = vmax($Vu32.b,$Vv32.b)",
-CVI_VA, TypeCVI_VA>, Enc_6223403, Requires<[HasV62T,UseHVX]> {
+tc_bbaf280e, TypeCVI_VA>, Enc_45364e, Requires<[HasV62T,UseHVX]> {
let Inst{7-5} = 0b101;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011111001;
@@ -37364,7 +37725,7 @@ def V6_vmaxb_128B : HInst<
(outs VectorRegs128B:$Vd32),
(ins VectorRegs128B:$Vu32, VectorRegs128B:$Vv32),
"$Vd32.b = vmax($Vu32.b,$Vv32.b)",
-CVI_VA, TypeCVI_VA>, Enc_6223403, Requires<[HasV62T,UseHVX]> {
+tc_bbaf280e, TypeCVI_VA>, Enc_45364e, Requires<[HasV62T,UseHVX]> {
let Inst{7-5} = 0b101;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011111001;
@@ -37400,7 +37761,7 @@ def V6_vmaxh : HInst<
(outs VectorRegs:$Vd32),
(ins VectorRegs:$Vu32, VectorRegs:$Vv32),
"$Vd32.h = vmax($Vu32.h,$Vv32.h)",
-CVI_VA, TypeCVI_VA>, Enc_6223403, Requires<[HasV60T,UseHVX]> {
+tc_bbaf280e, TypeCVI_VA>, Enc_45364e, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b111;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011111000;
@@ -37412,7 +37773,7 @@ def V6_vmaxh_128B : HInst<
(outs VectorRegs128B:$Vd32),
(ins VectorRegs128B:$Vu32, VectorRegs128B:$Vv32),
"$Vd32.h = vmax($Vu32.h,$Vv32.h)",
-CVI_VA, TypeCVI_VA>, Enc_6223403, Requires<[HasV60T,UseHVX]> {
+tc_bbaf280e, TypeCVI_VA>, Enc_45364e, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b111;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011111000;
@@ -37448,7 +37809,7 @@ def V6_vmaxub : HInst<
(outs VectorRegs:$Vd32),
(ins VectorRegs:$Vu32, VectorRegs:$Vv32),
"$Vd32.ub = vmax($Vu32.ub,$Vv32.ub)",
-CVI_VA, TypeCVI_VA>, Enc_6223403, Requires<[HasV60T,UseHVX]> {
+tc_bbaf280e, TypeCVI_VA>, Enc_45364e, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b101;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011111000;
@@ -37460,7 +37821,7 @@ def V6_vmaxub_128B : HInst<
(outs VectorRegs128B:$Vd32),
(ins VectorRegs128B:$Vu32, VectorRegs128B:$Vv32),
"$Vd32.ub = vmax($Vu32.ub,$Vv32.ub)",
-CVI_VA, TypeCVI_VA>, Enc_6223403, Requires<[HasV60T,UseHVX]> {
+tc_bbaf280e, TypeCVI_VA>, Enc_45364e, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b101;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011111000;
@@ -37496,7 +37857,7 @@ def V6_vmaxuh : HInst<
(outs VectorRegs:$Vd32),
(ins VectorRegs:$Vu32, VectorRegs:$Vv32),
"$Vd32.uh = vmax($Vu32.uh,$Vv32.uh)",
-CVI_VA, TypeCVI_VA>, Enc_6223403, Requires<[HasV60T,UseHVX]> {
+tc_bbaf280e, TypeCVI_VA>, Enc_45364e, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b110;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011111000;
@@ -37508,7 +37869,7 @@ def V6_vmaxuh_128B : HInst<
(outs VectorRegs128B:$Vd32),
(ins VectorRegs128B:$Vu32, VectorRegs128B:$Vv32),
"$Vd32.uh = vmax($Vu32.uh,$Vv32.uh)",
-CVI_VA, TypeCVI_VA>, Enc_6223403, Requires<[HasV60T,UseHVX]> {
+tc_bbaf280e, TypeCVI_VA>, Enc_45364e, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b110;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011111000;
@@ -37544,7 +37905,7 @@ def V6_vmaxw : HInst<
(outs VectorRegs:$Vd32),
(ins VectorRegs:$Vu32, VectorRegs:$Vv32),
"$Vd32.w = vmax($Vu32.w,$Vv32.w)",
-CVI_VA, TypeCVI_VA>, Enc_6223403, Requires<[HasV60T,UseHVX]> {
+tc_bbaf280e, TypeCVI_VA>, Enc_45364e, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011111001;
@@ -37556,7 +37917,7 @@ def V6_vmaxw_128B : HInst<
(outs VectorRegs128B:$Vd32),
(ins VectorRegs128B:$Vu32, VectorRegs128B:$Vv32),
"$Vd32.w = vmax($Vu32.w,$Vv32.w)",
-CVI_VA, TypeCVI_VA>, Enc_6223403, Requires<[HasV60T,UseHVX]> {
+tc_bbaf280e, TypeCVI_VA>, Enc_45364e, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011111001;
@@ -37592,7 +37953,7 @@ def V6_vminb : HInst<
(outs VectorRegs:$Vd32),
(ins VectorRegs:$Vu32, VectorRegs:$Vv32),
"$Vd32.b = vmin($Vu32.b,$Vv32.b)",
-CVI_VA, TypeCVI_VA>, Enc_6223403, Requires<[HasV62T,UseHVX]> {
+tc_bbaf280e, TypeCVI_VA>, Enc_45364e, Requires<[HasV62T,UseHVX]> {
let Inst{7-5} = 0b100;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011111001;
@@ -37604,7 +37965,7 @@ def V6_vminb_128B : HInst<
(outs VectorRegs128B:$Vd32),
(ins VectorRegs128B:$Vu32, VectorRegs128B:$Vv32),
"$Vd32.b = vmin($Vu32.b,$Vv32.b)",
-CVI_VA, TypeCVI_VA>, Enc_6223403, Requires<[HasV62T,UseHVX]> {
+tc_bbaf280e, TypeCVI_VA>, Enc_45364e, Requires<[HasV62T,UseHVX]> {
let Inst{7-5} = 0b100;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011111001;
@@ -37640,7 +38001,7 @@ def V6_vminh : HInst<
(outs VectorRegs:$Vd32),
(ins VectorRegs:$Vu32, VectorRegs:$Vv32),
"$Vd32.h = vmin($Vu32.h,$Vv32.h)",
-CVI_VA, TypeCVI_VA>, Enc_6223403, Requires<[HasV60T,UseHVX]> {
+tc_bbaf280e, TypeCVI_VA>, Enc_45364e, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b011;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011111000;
@@ -37652,7 +38013,7 @@ def V6_vminh_128B : HInst<
(outs VectorRegs128B:$Vd32),
(ins VectorRegs128B:$Vu32, VectorRegs128B:$Vv32),
"$Vd32.h = vmin($Vu32.h,$Vv32.h)",
-CVI_VA, TypeCVI_VA>, Enc_6223403, Requires<[HasV60T,UseHVX]> {
+tc_bbaf280e, TypeCVI_VA>, Enc_45364e, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b011;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011111000;
@@ -37688,7 +38049,7 @@ def V6_vminub : HInst<
(outs VectorRegs:$Vd32),
(ins VectorRegs:$Vu32, VectorRegs:$Vv32),
"$Vd32.ub = vmin($Vu32.ub,$Vv32.ub)",
-CVI_VA, TypeCVI_VA>, Enc_6223403, Requires<[HasV60T,UseHVX]> {
+tc_bbaf280e, TypeCVI_VA>, Enc_45364e, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b001;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011111000;
@@ -37700,7 +38061,7 @@ def V6_vminub_128B : HInst<
(outs VectorRegs128B:$Vd32),
(ins VectorRegs128B:$Vu32, VectorRegs128B:$Vv32),
"$Vd32.ub = vmin($Vu32.ub,$Vv32.ub)",
-CVI_VA, TypeCVI_VA>, Enc_6223403, Requires<[HasV60T,UseHVX]> {
+tc_bbaf280e, TypeCVI_VA>, Enc_45364e, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b001;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011111000;
@@ -37736,7 +38097,7 @@ def V6_vminuh : HInst<
(outs VectorRegs:$Vd32),
(ins VectorRegs:$Vu32, VectorRegs:$Vv32),
"$Vd32.uh = vmin($Vu32.uh,$Vv32.uh)",
-CVI_VA, TypeCVI_VA>, Enc_6223403, Requires<[HasV60T,UseHVX]> {
+tc_bbaf280e, TypeCVI_VA>, Enc_45364e, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b010;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011111000;
@@ -37748,7 +38109,7 @@ def V6_vminuh_128B : HInst<
(outs VectorRegs128B:$Vd32),
(ins VectorRegs128B:$Vu32, VectorRegs128B:$Vv32),
"$Vd32.uh = vmin($Vu32.uh,$Vv32.uh)",
-CVI_VA, TypeCVI_VA>, Enc_6223403, Requires<[HasV60T,UseHVX]> {
+tc_bbaf280e, TypeCVI_VA>, Enc_45364e, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b010;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011111000;
@@ -37784,7 +38145,7 @@ def V6_vminw : HInst<
(outs VectorRegs:$Vd32),
(ins VectorRegs:$Vu32, VectorRegs:$Vv32),
"$Vd32.w = vmin($Vu32.w,$Vv32.w)",
-CVI_VA, TypeCVI_VA>, Enc_6223403, Requires<[HasV60T,UseHVX]> {
+tc_bbaf280e, TypeCVI_VA>, Enc_45364e, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b100;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011111000;
@@ -37796,7 +38157,7 @@ def V6_vminw_128B : HInst<
(outs VectorRegs128B:$Vd32),
(ins VectorRegs128B:$Vu32, VectorRegs128B:$Vv32),
"$Vd32.w = vmin($Vu32.w,$Vv32.w)",
-CVI_VA, TypeCVI_VA>, Enc_6223403, Requires<[HasV60T,UseHVX]> {
+tc_bbaf280e, TypeCVI_VA>, Enc_45364e, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b100;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011111000;
@@ -37832,7 +38193,7 @@ def V6_vmpabus : HInst<
(outs VecDblRegs:$Vdd32),
(ins VecDblRegs:$Vuu32, IntRegs:$Rt32),
"$Vdd32.h = vmpa($Vuu32.ub,$Rt32.b)",
-CVI_VX_DV, TypeCVI_VX_DV>, Enc_5023792, Requires<[HasV60T,UseHVX]> {
+tc_7c3f55c4, TypeCVI_VX_DV>, Enc_aad80c, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b110;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011001001;
@@ -37844,7 +38205,7 @@ def V6_vmpabus_128B : HInst<
(outs VecDblRegs128B:$Vdd32),
(ins VecDblRegs128B:$Vuu32, IntRegs:$Rt32),
"$Vdd32.h = vmpa($Vuu32.ub,$Rt32.b)",
-CVI_VX_DV, TypeCVI_VX_DV>, Enc_5023792, Requires<[HasV60T,UseHVX]> {
+tc_7c3f55c4, TypeCVI_VX_DV>, Enc_aad80c, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b110;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011001001;
@@ -37857,7 +38218,7 @@ def V6_vmpabus_acc : HInst<
(outs VecDblRegs:$Vxx32),
(ins VecDblRegs:$Vxx32in, VecDblRegs:$Vuu32, IntRegs:$Rt32),
"$Vxx32.h += vmpa($Vuu32.ub,$Rt32.b)",
-CVI_VX_DV, TypeCVI_VX_DV>, Enc_4327792, Requires<[HasV60T,UseHVX]> {
+tc_d98f4d63, TypeCVI_VX_DV>, Enc_d6990d, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b110;
let Inst{13-13} = 0b1;
let Inst{31-21} = 0b00011001001;
@@ -37871,7 +38232,7 @@ def V6_vmpabus_acc_128B : HInst<
(outs VecDblRegs128B:$Vxx32),
(ins VecDblRegs128B:$Vxx32in, VecDblRegs128B:$Vuu32, IntRegs:$Rt32),
"$Vxx32.h += vmpa($Vuu32.ub,$Rt32.b)",
-CVI_VX_DV, TypeCVI_VX_DV>, Enc_4327792, Requires<[HasV60T,UseHVX]> {
+tc_d98f4d63, TypeCVI_VX_DV>, Enc_d6990d, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b110;
let Inst{13-13} = 0b1;
let Inst{31-21} = 0b00011001001;
@@ -37936,7 +38297,7 @@ def V6_vmpabusv : HInst<
(outs VecDblRegs:$Vdd32),
(ins VecDblRegs:$Vuu32, VecDblRegs:$Vvv32),
"$Vdd32.h = vmpa($Vuu32.ub,$Vvv32.b)",
-CVI_VX_DV_LONG, TypeCVI_VX_DV>, Enc_13211717, Requires<[HasV60T,UseHVX]> {
+tc_eda67dcd, TypeCVI_VX_DV>, Enc_f8ecf9, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b011;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011100001;
@@ -37948,7 +38309,7 @@ def V6_vmpabusv_128B : HInst<
(outs VecDblRegs128B:$Vdd32),
(ins VecDblRegs128B:$Vuu32, VecDblRegs128B:$Vvv32),
"$Vdd32.h = vmpa($Vuu32.ub,$Vvv32.b)",
-CVI_VX_DV_LONG, TypeCVI_VX_DV>, Enc_13211717, Requires<[HasV60T,UseHVX]> {
+tc_eda67dcd, TypeCVI_VX_DV>, Enc_f8ecf9, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b011;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011100001;
@@ -37984,7 +38345,7 @@ def V6_vmpabuuv : HInst<
(outs VecDblRegs:$Vdd32),
(ins VecDblRegs:$Vuu32, VecDblRegs:$Vvv32),
"$Vdd32.h = vmpa($Vuu32.ub,$Vvv32.ub)",
-CVI_VX_DV_LONG, TypeCVI_VX_DV>, Enc_13211717, Requires<[HasV60T,UseHVX]> {
+tc_eda67dcd, TypeCVI_VX_DV>, Enc_f8ecf9, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b111;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011100111;
@@ -37996,7 +38357,7 @@ def V6_vmpabuuv_128B : HInst<
(outs VecDblRegs128B:$Vdd32),
(ins VecDblRegs128B:$Vuu32, VecDblRegs128B:$Vvv32),
"$Vdd32.h = vmpa($Vuu32.ub,$Vvv32.ub)",
-CVI_VX_DV_LONG, TypeCVI_VX_DV>, Enc_13211717, Requires<[HasV60T,UseHVX]> {
+tc_eda67dcd, TypeCVI_VX_DV>, Enc_f8ecf9, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b111;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011100111;
@@ -38032,7 +38393,7 @@ def V6_vmpahb : HInst<
(outs VecDblRegs:$Vdd32),
(ins VecDblRegs:$Vuu32, IntRegs:$Rt32),
"$Vdd32.w = vmpa($Vuu32.h,$Rt32.b)",
-CVI_VX_DV, TypeCVI_VX_DV>, Enc_5023792, Requires<[HasV60T,UseHVX]> {
+tc_7c3f55c4, TypeCVI_VX_DV>, Enc_aad80c, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b111;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011001001;
@@ -38044,7 +38405,7 @@ def V6_vmpahb_128B : HInst<
(outs VecDblRegs128B:$Vdd32),
(ins VecDblRegs128B:$Vuu32, IntRegs:$Rt32),
"$Vdd32.w = vmpa($Vuu32.h,$Rt32.b)",
-CVI_VX_DV, TypeCVI_VX_DV>, Enc_5023792, Requires<[HasV60T,UseHVX]> {
+tc_7c3f55c4, TypeCVI_VX_DV>, Enc_aad80c, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b111;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011001001;
@@ -38057,7 +38418,7 @@ def V6_vmpahb_acc : HInst<
(outs VecDblRegs:$Vxx32),
(ins VecDblRegs:$Vxx32in, VecDblRegs:$Vuu32, IntRegs:$Rt32),
"$Vxx32.w += vmpa($Vuu32.h,$Rt32.b)",
-CVI_VX_DV, TypeCVI_VX_DV>, Enc_4327792, Requires<[HasV60T,UseHVX]> {
+tc_d98f4d63, TypeCVI_VX_DV>, Enc_d6990d, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b111;
let Inst{13-13} = 0b1;
let Inst{31-21} = 0b00011001001;
@@ -38071,7 +38432,7 @@ def V6_vmpahb_acc_128B : HInst<
(outs VecDblRegs128B:$Vxx32),
(ins VecDblRegs128B:$Vxx32in, VecDblRegs128B:$Vuu32, IntRegs:$Rt32),
"$Vxx32.w += vmpa($Vuu32.h,$Rt32.b)",
-CVI_VX_DV, TypeCVI_VX_DV>, Enc_4327792, Requires<[HasV60T,UseHVX]> {
+tc_d98f4d63, TypeCVI_VX_DV>, Enc_d6990d, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b111;
let Inst{13-13} = 0b1;
let Inst{31-21} = 0b00011001001;
@@ -38136,7 +38497,7 @@ def V6_vmpauhb : HInst<
(outs VecDblRegs:$Vdd32),
(ins VecDblRegs:$Vuu32, IntRegs:$Rt32),
"$Vdd32.w = vmpa($Vuu32.uh,$Rt32.b)",
-CVI_VX_DV_LONG, TypeCVI_VX_DV>, Enc_5023792, Requires<[HasV62T,UseHVX]> {
+tc_7c3f55c4, TypeCVI_VX_DV>, Enc_aad80c, Requires<[HasV62T,UseHVX]> {
let Inst{7-5} = 0b101;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011001100;
@@ -38148,7 +38509,7 @@ def V6_vmpauhb_128B : HInst<
(outs VecDblRegs128B:$Vdd32),
(ins VecDblRegs128B:$Vuu32, IntRegs:$Rt32),
"$Vdd32.w = vmpa($Vuu32.uh,$Rt32.b)",
-CVI_VX_DV_LONG, TypeCVI_VX_DV>, Enc_5023792, Requires<[HasV62T,UseHVX]> {
+tc_7c3f55c4, TypeCVI_VX_DV>, Enc_aad80c, Requires<[HasV62T,UseHVX]> {
let Inst{7-5} = 0b101;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011001100;
@@ -38161,7 +38522,7 @@ def V6_vmpauhb_acc : HInst<
(outs VecDblRegs:$Vxx32),
(ins VecDblRegs:$Vxx32in, VecDblRegs:$Vuu32, IntRegs:$Rt32),
"$Vxx32.w += vmpa($Vuu32.uh,$Rt32.b)",
-CVI_VX_DV_LONG, TypeCVI_VX_DV>, Enc_4327792, Requires<[HasV62T,UseHVX]> {
+tc_d98f4d63, TypeCVI_VX_DV>, Enc_d6990d, Requires<[HasV62T,UseHVX]> {
let Inst{7-5} = 0b010;
let Inst{13-13} = 0b1;
let Inst{31-21} = 0b00011001100;
@@ -38175,7 +38536,7 @@ def V6_vmpauhb_acc_128B : HInst<
(outs VecDblRegs128B:$Vxx32),
(ins VecDblRegs128B:$Vxx32in, VecDblRegs128B:$Vuu32, IntRegs:$Rt32),
"$Vxx32.w += vmpa($Vuu32.uh,$Rt32.b)",
-CVI_VX_DV_LONG, TypeCVI_VX_DV>, Enc_4327792, Requires<[HasV62T,UseHVX]> {
+tc_d98f4d63, TypeCVI_VX_DV>, Enc_d6990d, Requires<[HasV62T,UseHVX]> {
let Inst{7-5} = 0b010;
let Inst{13-13} = 0b1;
let Inst{31-21} = 0b00011001100;
@@ -38240,7 +38601,7 @@ def V6_vmpybus : HInst<
(outs VecDblRegs:$Vdd32),
(ins VectorRegs:$Vu32, IntRegs:$Rt32),
"$Vdd32.h = vmpy($Vu32.ub,$Rt32.b)",
-CVI_VX_DV, TypeCVI_VX_DV>, Enc_11471622, Requires<[HasV60T,UseHVX]> {
+tc_7c3f55c4, TypeCVI_VX_DV>, Enc_01d3d0, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b101;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011001001;
@@ -38252,7 +38613,7 @@ def V6_vmpybus_128B : HInst<
(outs VecDblRegs128B:$Vdd32),
(ins VectorRegs128B:$Vu32, IntRegs:$Rt32),
"$Vdd32.h = vmpy($Vu32.ub,$Rt32.b)",
-CVI_VX_DV, TypeCVI_VX_DV>, Enc_11471622, Requires<[HasV60T,UseHVX]> {
+tc_7c3f55c4, TypeCVI_VX_DV>, Enc_01d3d0, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b101;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011001001;
@@ -38265,7 +38626,7 @@ def V6_vmpybus_acc : HInst<
(outs VecDblRegs:$Vxx32),
(ins VecDblRegs:$Vxx32in, VectorRegs:$Vu32, IntRegs:$Rt32),
"$Vxx32.h += vmpy($Vu32.ub,$Rt32.b)",
-CVI_VX_DV, TypeCVI_VX_DV>, Enc_2153798, Requires<[HasV60T,UseHVX]> {
+tc_d98f4d63, TypeCVI_VX_DV>, Enc_5e8512, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b101;
let Inst{13-13} = 0b1;
let Inst{31-21} = 0b00011001001;
@@ -38279,7 +38640,7 @@ def V6_vmpybus_acc_128B : HInst<
(outs VecDblRegs128B:$Vxx32),
(ins VecDblRegs128B:$Vxx32in, VectorRegs128B:$Vu32, IntRegs:$Rt32),
"$Vxx32.h += vmpy($Vu32.ub,$Rt32.b)",
-CVI_VX_DV, TypeCVI_VX_DV>, Enc_2153798, Requires<[HasV60T,UseHVX]> {
+tc_d98f4d63, TypeCVI_VX_DV>, Enc_5e8512, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b101;
let Inst{13-13} = 0b1;
let Inst{31-21} = 0b00011001001;
@@ -38344,7 +38705,7 @@ def V6_vmpybusv : HInst<
(outs VecDblRegs:$Vdd32),
(ins VectorRegs:$Vu32, VectorRegs:$Vv32),
"$Vdd32.h = vmpy($Vu32.ub,$Vv32.b)",
-CVI_VX_DV, TypeCVI_VX_DV>, Enc_15290236, Requires<[HasV60T,UseHVX]> {
+tc_eda67dcd, TypeCVI_VX_DV>, Enc_71bb9b, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b110;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011100000;
@@ -38356,7 +38717,7 @@ def V6_vmpybusv_128B : HInst<
(outs VecDblRegs128B:$Vdd32),
(ins VectorRegs128B:$Vu32, VectorRegs128B:$Vv32),
"$Vdd32.h = vmpy($Vu32.ub,$Vv32.b)",
-CVI_VX_DV, TypeCVI_VX_DV>, Enc_15290236, Requires<[HasV60T,UseHVX]> {
+tc_eda67dcd, TypeCVI_VX_DV>, Enc_71bb9b, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b110;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011100000;
@@ -38369,7 +38730,7 @@ def V6_vmpybusv_acc : HInst<
(outs VecDblRegs:$Vxx32),
(ins VecDblRegs:$Vxx32in, VectorRegs:$Vu32, VectorRegs:$Vv32),
"$Vxx32.h += vmpy($Vu32.ub,$Vv32.b)",
-CVI_VX_DV, TypeCVI_VX_DV>, Enc_5972412, Requires<[HasV60T,UseHVX]> {
+tc_e172d86a, TypeCVI_VX_DV>, Enc_3fc427, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b110;
let Inst{13-13} = 0b1;
let Inst{31-21} = 0b00011100000;
@@ -38383,7 +38744,7 @@ def V6_vmpybusv_acc_128B : HInst<
(outs VecDblRegs128B:$Vxx32),
(ins VecDblRegs128B:$Vxx32in, VectorRegs128B:$Vu32, VectorRegs128B:$Vv32),
"$Vxx32.h += vmpy($Vu32.ub,$Vv32.b)",
-CVI_VX_DV, TypeCVI_VX_DV>, Enc_5972412, Requires<[HasV60T,UseHVX]> {
+tc_e172d86a, TypeCVI_VX_DV>, Enc_3fc427, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b110;
let Inst{13-13} = 0b1;
let Inst{31-21} = 0b00011100000;
@@ -38448,7 +38809,7 @@ def V6_vmpybv : HInst<
(outs VecDblRegs:$Vdd32),
(ins VectorRegs:$Vu32, VectorRegs:$Vv32),
"$Vdd32.h = vmpy($Vu32.b,$Vv32.b)",
-CVI_VX_DV, TypeCVI_VX_DV>, Enc_15290236, Requires<[HasV60T,UseHVX]> {
+tc_eda67dcd, TypeCVI_VX_DV>, Enc_71bb9b, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b100;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011100000;
@@ -38460,7 +38821,7 @@ def V6_vmpybv_128B : HInst<
(outs VecDblRegs128B:$Vdd32),
(ins VectorRegs128B:$Vu32, VectorRegs128B:$Vv32),
"$Vdd32.h = vmpy($Vu32.b,$Vv32.b)",
-CVI_VX_DV, TypeCVI_VX_DV>, Enc_15290236, Requires<[HasV60T,UseHVX]> {
+tc_eda67dcd, TypeCVI_VX_DV>, Enc_71bb9b, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b100;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011100000;
@@ -38473,7 +38834,7 @@ def V6_vmpybv_acc : HInst<
(outs VecDblRegs:$Vxx32),
(ins VecDblRegs:$Vxx32in, VectorRegs:$Vu32, VectorRegs:$Vv32),
"$Vxx32.h += vmpy($Vu32.b,$Vv32.b)",
-CVI_VX_DV, TypeCVI_VX_DV>, Enc_5972412, Requires<[HasV60T,UseHVX]> {
+tc_e172d86a, TypeCVI_VX_DV>, Enc_3fc427, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b100;
let Inst{13-13} = 0b1;
let Inst{31-21} = 0b00011100000;
@@ -38487,7 +38848,7 @@ def V6_vmpybv_acc_128B : HInst<
(outs VecDblRegs128B:$Vxx32),
(ins VecDblRegs128B:$Vxx32in, VectorRegs128B:$Vu32, VectorRegs128B:$Vv32),
"$Vxx32.h += vmpy($Vu32.b,$Vv32.b)",
-CVI_VX_DV, TypeCVI_VX_DV>, Enc_5972412, Requires<[HasV60T,UseHVX]> {
+tc_e172d86a, TypeCVI_VX_DV>, Enc_3fc427, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b100;
let Inst{13-13} = 0b1;
let Inst{31-21} = 0b00011100000;
@@ -38552,7 +38913,7 @@ def V6_vmpyewuh : HInst<
(outs VectorRegs:$Vd32),
(ins VectorRegs:$Vu32, VectorRegs:$Vv32),
"$Vd32.w = vmpye($Vu32.w,$Vv32.uh)",
-CVI_VX_DV, TypeCVI_VX_DV>, Enc_6223403, Requires<[HasV60T,UseHVX]> {
+tc_eda67dcd, TypeCVI_VX_DV>, Enc_45364e, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b101;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011111111;
@@ -38564,7 +38925,7 @@ def V6_vmpyewuh_128B : HInst<
(outs VectorRegs128B:$Vd32),
(ins VectorRegs128B:$Vu32, VectorRegs128B:$Vv32),
"$Vd32.w = vmpye($Vu32.w,$Vv32.uh)",
-CVI_VX_DV, TypeCVI_VX_DV>, Enc_6223403, Requires<[HasV60T,UseHVX]> {
+tc_eda67dcd, TypeCVI_VX_DV>, Enc_45364e, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b101;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011111111;
@@ -38577,7 +38938,7 @@ def V6_vmpyewuh_64 : HInst<
(outs VecDblRegs:$Vdd32),
(ins VectorRegs:$Vu32, VectorRegs:$Vv32),
"$Vdd32 = vmpye($Vu32.w,$Vv32.uh)",
-CVI_VX_DV_LONG, TypeCVI_VX_DV>, Enc_15290236, Requires<[HasV62T,UseHVX]> {
+tc_eda67dcd, TypeCVI_VX_DV>, Enc_71bb9b, Requires<[HasV62T,UseHVX]> {
let Inst{7-5} = 0b110;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011110101;
@@ -38589,7 +38950,7 @@ def V6_vmpyewuh_64_128B : HInst<
(outs VecDblRegs128B:$Vdd32),
(ins VectorRegs128B:$Vu32, VectorRegs128B:$Vv32),
"$Vdd32 = vmpye($Vu32.w,$Vv32.uh)",
-CVI_VX_DV_LONG, TypeCVI_VX_DV>, Enc_15290236, Requires<[HasV62T,UseHVX]> {
+tc_eda67dcd, TypeCVI_VX_DV>, Enc_71bb9b, Requires<[HasV62T,UseHVX]> {
let Inst{7-5} = 0b110;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011110101;
@@ -38625,7 +38986,7 @@ def V6_vmpyh : HInst<
(outs VecDblRegs:$Vdd32),
(ins VectorRegs:$Vu32, IntRegs:$Rt32),
"$Vdd32.w = vmpy($Vu32.h,$Rt32.h)",
-CVI_VX_DV, TypeCVI_VX_DV>, Enc_11471622, Requires<[HasV60T,UseHVX]> {
+tc_7c3f55c4, TypeCVI_VX_DV>, Enc_01d3d0, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011001010;
@@ -38637,7 +38998,7 @@ def V6_vmpyh_128B : HInst<
(outs VecDblRegs128B:$Vdd32),
(ins VectorRegs128B:$Vu32, IntRegs:$Rt32),
"$Vdd32.w = vmpy($Vu32.h,$Rt32.h)",
-CVI_VX_DV, TypeCVI_VX_DV>, Enc_11471622, Requires<[HasV60T,UseHVX]> {
+tc_7c3f55c4, TypeCVI_VX_DV>, Enc_01d3d0, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011001010;
@@ -38673,7 +39034,7 @@ def V6_vmpyhsat_acc : HInst<
(outs VecDblRegs:$Vxx32),
(ins VecDblRegs:$Vxx32in, VectorRegs:$Vu32, IntRegs:$Rt32),
"$Vxx32.w += vmpy($Vu32.h,$Rt32.h):sat",
-CVI_VX_DV, TypeCVI_VX_DV>, Enc_2153798, Requires<[HasV60T,UseHVX]> {
+tc_d98f4d63, TypeCVI_VX_DV>, Enc_5e8512, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b1;
let Inst{31-21} = 0b00011001010;
@@ -38687,7 +39048,7 @@ def V6_vmpyhsat_acc_128B : HInst<
(outs VecDblRegs128B:$Vxx32),
(ins VecDblRegs128B:$Vxx32in, VectorRegs128B:$Vu32, IntRegs:$Rt32),
"$Vxx32.w += vmpy($Vu32.h,$Rt32.h):sat",
-CVI_VX_DV, TypeCVI_VX_DV>, Enc_2153798, Requires<[HasV60T,UseHVX]> {
+tc_d98f4d63, TypeCVI_VX_DV>, Enc_5e8512, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b1;
let Inst{31-21} = 0b00011001010;
@@ -38729,7 +39090,7 @@ def V6_vmpyhsrs : HInst<
(outs VectorRegs:$Vd32),
(ins VectorRegs:$Vu32, IntRegs:$Rt32),
"$Vd32.h = vmpy($Vu32.h,$Rt32.h):<<1:rnd:sat",
-CVI_VX_DV, TypeCVI_VX_DV>, Enc_16214129, Requires<[HasV60T,UseHVX]> {
+tc_7c3f55c4, TypeCVI_VX_DV>, Enc_b087ac, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b010;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011001010;
@@ -38741,7 +39102,7 @@ def V6_vmpyhsrs_128B : HInst<
(outs VectorRegs128B:$Vd32),
(ins VectorRegs128B:$Vu32, IntRegs:$Rt32),
"$Vd32.h = vmpy($Vu32.h,$Rt32.h):<<1:rnd:sat",
-CVI_VX_DV, TypeCVI_VX_DV>, Enc_16214129, Requires<[HasV60T,UseHVX]> {
+tc_7c3f55c4, TypeCVI_VX_DV>, Enc_b087ac, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b010;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011001010;
@@ -38777,7 +39138,7 @@ def V6_vmpyhss : HInst<
(outs VectorRegs:$Vd32),
(ins VectorRegs:$Vu32, IntRegs:$Rt32),
"$Vd32.h = vmpy($Vu32.h,$Rt32.h):<<1:sat",
-CVI_VX_DV, TypeCVI_VX_DV>, Enc_16214129, Requires<[HasV60T,UseHVX]> {
+tc_7c3f55c4, TypeCVI_VX_DV>, Enc_b087ac, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b001;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011001010;
@@ -38789,7 +39150,7 @@ def V6_vmpyhss_128B : HInst<
(outs VectorRegs128B:$Vd32),
(ins VectorRegs128B:$Vu32, IntRegs:$Rt32),
"$Vd32.h = vmpy($Vu32.h,$Rt32.h):<<1:sat",
-CVI_VX_DV, TypeCVI_VX_DV>, Enc_16214129, Requires<[HasV60T,UseHVX]> {
+tc_7c3f55c4, TypeCVI_VX_DV>, Enc_b087ac, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b001;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011001010;
@@ -38825,7 +39186,7 @@ def V6_vmpyhus : HInst<
(outs VecDblRegs:$Vdd32),
(ins VectorRegs:$Vu32, VectorRegs:$Vv32),
"$Vdd32.w = vmpy($Vu32.h,$Vv32.uh)",
-CVI_VX_DV, TypeCVI_VX_DV>, Enc_15290236, Requires<[HasV60T,UseHVX]> {
+tc_eda67dcd, TypeCVI_VX_DV>, Enc_71bb9b, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b010;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011100001;
@@ -38837,7 +39198,7 @@ def V6_vmpyhus_128B : HInst<
(outs VecDblRegs128B:$Vdd32),
(ins VectorRegs128B:$Vu32, VectorRegs128B:$Vv32),
"$Vdd32.w = vmpy($Vu32.h,$Vv32.uh)",
-CVI_VX_DV, TypeCVI_VX_DV>, Enc_15290236, Requires<[HasV60T,UseHVX]> {
+tc_eda67dcd, TypeCVI_VX_DV>, Enc_71bb9b, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b010;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011100001;
@@ -38850,7 +39211,7 @@ def V6_vmpyhus_acc : HInst<
(outs VecDblRegs:$Vxx32),
(ins VecDblRegs:$Vxx32in, VectorRegs:$Vu32, VectorRegs:$Vv32),
"$Vxx32.w += vmpy($Vu32.h,$Vv32.uh)",
-CVI_VX_DV, TypeCVI_VX_DV>, Enc_5972412, Requires<[HasV60T,UseHVX]> {
+tc_e172d86a, TypeCVI_VX_DV>, Enc_3fc427, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b001;
let Inst{13-13} = 0b1;
let Inst{31-21} = 0b00011100001;
@@ -38864,7 +39225,7 @@ def V6_vmpyhus_acc_128B : HInst<
(outs VecDblRegs128B:$Vxx32),
(ins VecDblRegs128B:$Vxx32in, VectorRegs128B:$Vu32, VectorRegs128B:$Vv32),
"$Vxx32.w += vmpy($Vu32.h,$Vv32.uh)",
-CVI_VX_DV, TypeCVI_VX_DV>, Enc_5972412, Requires<[HasV60T,UseHVX]> {
+tc_e172d86a, TypeCVI_VX_DV>, Enc_3fc427, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b001;
let Inst{13-13} = 0b1;
let Inst{31-21} = 0b00011100001;
@@ -38929,7 +39290,7 @@ def V6_vmpyhv : HInst<
(outs VecDblRegs:$Vdd32),
(ins VectorRegs:$Vu32, VectorRegs:$Vv32),
"$Vdd32.w = vmpy($Vu32.h,$Vv32.h)",
-CVI_VX_DV, TypeCVI_VX_DV>, Enc_15290236, Requires<[HasV60T,UseHVX]> {
+tc_eda67dcd, TypeCVI_VX_DV>, Enc_71bb9b, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b111;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011100000;
@@ -38941,7 +39302,7 @@ def V6_vmpyhv_128B : HInst<
(outs VecDblRegs128B:$Vdd32),
(ins VectorRegs128B:$Vu32, VectorRegs128B:$Vv32),
"$Vdd32.w = vmpy($Vu32.h,$Vv32.h)",
-CVI_VX_DV, TypeCVI_VX_DV>, Enc_15290236, Requires<[HasV60T,UseHVX]> {
+tc_eda67dcd, TypeCVI_VX_DV>, Enc_71bb9b, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b111;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011100000;
@@ -38954,7 +39315,7 @@ def V6_vmpyhv_acc : HInst<
(outs VecDblRegs:$Vxx32),
(ins VecDblRegs:$Vxx32in, VectorRegs:$Vu32, VectorRegs:$Vv32),
"$Vxx32.w += vmpy($Vu32.h,$Vv32.h)",
-CVI_VX_DV, TypeCVI_VX_DV>, Enc_5972412, Requires<[HasV60T,UseHVX]> {
+tc_e172d86a, TypeCVI_VX_DV>, Enc_3fc427, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b111;
let Inst{13-13} = 0b1;
let Inst{31-21} = 0b00011100000;
@@ -38968,7 +39329,7 @@ def V6_vmpyhv_acc_128B : HInst<
(outs VecDblRegs128B:$Vxx32),
(ins VecDblRegs128B:$Vxx32in, VectorRegs128B:$Vu32, VectorRegs128B:$Vv32),
"$Vxx32.w += vmpy($Vu32.h,$Vv32.h)",
-CVI_VX_DV, TypeCVI_VX_DV>, Enc_5972412, Requires<[HasV60T,UseHVX]> {
+tc_e172d86a, TypeCVI_VX_DV>, Enc_3fc427, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b111;
let Inst{13-13} = 0b1;
let Inst{31-21} = 0b00011100000;
@@ -39033,7 +39394,7 @@ def V6_vmpyhvsrs : HInst<
(outs VectorRegs:$Vd32),
(ins VectorRegs:$Vu32, VectorRegs:$Vv32),
"$Vd32.h = vmpy($Vu32.h,$Vv32.h):<<1:rnd:sat",
-CVI_VX_DV, TypeCVI_VX_DV>, Enc_6223403, Requires<[HasV60T,UseHVX]> {
+tc_eda67dcd, TypeCVI_VX_DV>, Enc_45364e, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b001;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011100001;
@@ -39045,7 +39406,7 @@ def V6_vmpyhvsrs_128B : HInst<
(outs VectorRegs128B:$Vd32),
(ins VectorRegs128B:$Vu32, VectorRegs128B:$Vv32),
"$Vd32.h = vmpy($Vu32.h,$Vv32.h):<<1:rnd:sat",
-CVI_VX_DV, TypeCVI_VX_DV>, Enc_6223403, Requires<[HasV60T,UseHVX]> {
+tc_eda67dcd, TypeCVI_VX_DV>, Enc_45364e, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b001;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011100001;
@@ -39081,7 +39442,7 @@ def V6_vmpyieoh : HInst<
(outs VectorRegs:$Vd32),
(ins VectorRegs:$Vu32, VectorRegs:$Vv32),
"$Vd32.w = vmpyieo($Vu32.h,$Vv32.h)",
-CVI_VX, TypeCVI_VX>, Enc_6223403, Requires<[HasV60T,UseHVX]> {
+tc_908a4c8c, TypeCVI_VX>, Enc_45364e, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011111011;
@@ -39093,7 +39454,7 @@ def V6_vmpyieoh_128B : HInst<
(outs VectorRegs128B:$Vd32),
(ins VectorRegs128B:$Vu32, VectorRegs128B:$Vv32),
"$Vd32.w = vmpyieo($Vu32.h,$Vv32.h)",
-CVI_VX, TypeCVI_VX>, Enc_6223403, Requires<[HasV60T,UseHVX]> {
+tc_908a4c8c, TypeCVI_VX>, Enc_45364e, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011111011;
@@ -39106,7 +39467,7 @@ def V6_vmpyiewh_acc : HInst<
(outs VectorRegs:$Vx32),
(ins VectorRegs:$Vx32in, VectorRegs:$Vu32, VectorRegs:$Vv32),
"$Vx32.w += vmpyie($Vu32.w,$Vv32.h)",
-CVI_VX_DV, TypeCVI_VX_DV>, Enc_2328527, Requires<[HasV60T,UseHVX]> {
+tc_e172d86a, TypeCVI_VX_DV>, Enc_a7341a, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b1;
let Inst{31-21} = 0b00011100010;
@@ -39120,7 +39481,7 @@ def V6_vmpyiewh_acc_128B : HInst<
(outs VectorRegs128B:$Vx32),
(ins VectorRegs128B:$Vx32in, VectorRegs128B:$Vu32, VectorRegs128B:$Vv32),
"$Vx32.w += vmpyie($Vu32.w,$Vv32.h)",
-CVI_VX_DV, TypeCVI_VX_DV>, Enc_2328527, Requires<[HasV60T,UseHVX]> {
+tc_e172d86a, TypeCVI_VX_DV>, Enc_a7341a, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b1;
let Inst{31-21} = 0b00011100010;
@@ -39162,7 +39523,7 @@ def V6_vmpyiewuh : HInst<
(outs VectorRegs:$Vd32),
(ins VectorRegs:$Vu32, VectorRegs:$Vv32),
"$Vd32.w = vmpyie($Vu32.w,$Vv32.uh)",
-CVI_VX_DV, TypeCVI_VX_DV>, Enc_6223403, Requires<[HasV60T,UseHVX]> {
+tc_eda67dcd, TypeCVI_VX_DV>, Enc_45364e, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011111110;
@@ -39174,7 +39535,7 @@ def V6_vmpyiewuh_128B : HInst<
(outs VectorRegs128B:$Vd32),
(ins VectorRegs128B:$Vu32, VectorRegs128B:$Vv32),
"$Vd32.w = vmpyie($Vu32.w,$Vv32.uh)",
-CVI_VX_DV, TypeCVI_VX_DV>, Enc_6223403, Requires<[HasV60T,UseHVX]> {
+tc_eda67dcd, TypeCVI_VX_DV>, Enc_45364e, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011111110;
@@ -39187,7 +39548,7 @@ def V6_vmpyiewuh_acc : HInst<
(outs VectorRegs:$Vx32),
(ins VectorRegs:$Vx32in, VectorRegs:$Vu32, VectorRegs:$Vv32),
"$Vx32.w += vmpyie($Vu32.w,$Vv32.uh)",
-CVI_VX_DV, TypeCVI_VX_DV>, Enc_2328527, Requires<[HasV60T,UseHVX]> {
+tc_e172d86a, TypeCVI_VX_DV>, Enc_a7341a, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b101;
let Inst{13-13} = 0b1;
let Inst{31-21} = 0b00011100001;
@@ -39201,7 +39562,7 @@ def V6_vmpyiewuh_acc_128B : HInst<
(outs VectorRegs128B:$Vx32),
(ins VectorRegs128B:$Vx32in, VectorRegs128B:$Vu32, VectorRegs128B:$Vv32),
"$Vx32.w += vmpyie($Vu32.w,$Vv32.uh)",
-CVI_VX_DV, TypeCVI_VX_DV>, Enc_2328527, Requires<[HasV60T,UseHVX]> {
+tc_e172d86a, TypeCVI_VX_DV>, Enc_a7341a, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b101;
let Inst{13-13} = 0b1;
let Inst{31-21} = 0b00011100001;
@@ -39266,7 +39627,7 @@ def V6_vmpyih : HInst<
(outs VectorRegs:$Vd32),
(ins VectorRegs:$Vu32, VectorRegs:$Vv32),
"$Vd32.h = vmpyi($Vu32.h,$Vv32.h)",
-CVI_VX_DV, TypeCVI_VX_DV>, Enc_6223403, Requires<[HasV60T,UseHVX]> {
+tc_eda67dcd, TypeCVI_VX_DV>, Enc_45364e, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b100;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011100001;
@@ -39278,7 +39639,7 @@ def V6_vmpyih_128B : HInst<
(outs VectorRegs128B:$Vd32),
(ins VectorRegs128B:$Vu32, VectorRegs128B:$Vv32),
"$Vd32.h = vmpyi($Vu32.h,$Vv32.h)",
-CVI_VX_DV, TypeCVI_VX_DV>, Enc_6223403, Requires<[HasV60T,UseHVX]> {
+tc_eda67dcd, TypeCVI_VX_DV>, Enc_45364e, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b100;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011100001;
@@ -39291,7 +39652,7 @@ def V6_vmpyih_acc : HInst<
(outs VectorRegs:$Vx32),
(ins VectorRegs:$Vx32in, VectorRegs:$Vu32, VectorRegs:$Vv32),
"$Vx32.h += vmpyi($Vu32.h,$Vv32.h)",
-CVI_VX_DV, TypeCVI_VX_DV>, Enc_2328527, Requires<[HasV60T,UseHVX]> {
+tc_e172d86a, TypeCVI_VX_DV>, Enc_a7341a, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b100;
let Inst{13-13} = 0b1;
let Inst{31-21} = 0b00011100001;
@@ -39305,7 +39666,7 @@ def V6_vmpyih_acc_128B : HInst<
(outs VectorRegs128B:$Vx32),
(ins VectorRegs128B:$Vx32in, VectorRegs128B:$Vu32, VectorRegs128B:$Vv32),
"$Vx32.h += vmpyi($Vu32.h,$Vv32.h)",
-CVI_VX_DV, TypeCVI_VX_DV>, Enc_2328527, Requires<[HasV60T,UseHVX]> {
+tc_e172d86a, TypeCVI_VX_DV>, Enc_a7341a, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b100;
let Inst{13-13} = 0b1;
let Inst{31-21} = 0b00011100001;
@@ -39370,7 +39731,7 @@ def V6_vmpyihb : HInst<
(outs VectorRegs:$Vd32),
(ins VectorRegs:$Vu32, IntRegs:$Rt32),
"$Vd32.h = vmpyi($Vu32.h,$Rt32.b)",
-CVI_VX_LONG, TypeCVI_VX>, Enc_16214129, Requires<[HasV60T,UseHVX]> {
+tc_69b6dd20, TypeCVI_VX>, Enc_b087ac, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011001011;
@@ -39382,7 +39743,7 @@ def V6_vmpyihb_128B : HInst<
(outs VectorRegs128B:$Vd32),
(ins VectorRegs128B:$Vu32, IntRegs:$Rt32),
"$Vd32.h = vmpyi($Vu32.h,$Rt32.b)",
-CVI_VX_LONG, TypeCVI_VX>, Enc_16214129, Requires<[HasV60T,UseHVX]> {
+tc_69b6dd20, TypeCVI_VX>, Enc_b087ac, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011001011;
@@ -39395,7 +39756,7 @@ def V6_vmpyihb_acc : HInst<
(outs VectorRegs:$Vx32),
(ins VectorRegs:$Vx32in, VectorRegs:$Vu32, IntRegs:$Rt32),
"$Vx32.h += vmpyi($Vu32.h,$Rt32.b)",
-CVI_VX, TypeCVI_VX>, Enc_10058269, Requires<[HasV60T,UseHVX]> {
+tc_d725e5b0, TypeCVI_VX>, Enc_5138b3, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b001;
let Inst{13-13} = 0b1;
let Inst{31-21} = 0b00011001011;
@@ -39409,7 +39770,7 @@ def V6_vmpyihb_acc_128B : HInst<
(outs VectorRegs128B:$Vx32),
(ins VectorRegs128B:$Vx32in, VectorRegs128B:$Vu32, IntRegs:$Rt32),
"$Vx32.h += vmpyi($Vu32.h,$Rt32.b)",
-CVI_VX, TypeCVI_VX>, Enc_10058269, Requires<[HasV60T,UseHVX]> {
+tc_d725e5b0, TypeCVI_VX>, Enc_5138b3, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b001;
let Inst{13-13} = 0b1;
let Inst{31-21} = 0b00011001011;
@@ -39474,7 +39835,7 @@ def V6_vmpyiowh : HInst<
(outs VectorRegs:$Vd32),
(ins VectorRegs:$Vu32, VectorRegs:$Vv32),
"$Vd32.w = vmpyio($Vu32.w,$Vv32.h)",
-CVI_VX_DV, TypeCVI_VX_DV>, Enc_6223403, Requires<[HasV60T,UseHVX]> {
+tc_eda67dcd, TypeCVI_VX_DV>, Enc_45364e, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b001;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011111110;
@@ -39486,7 +39847,7 @@ def V6_vmpyiowh_128B : HInst<
(outs VectorRegs128B:$Vd32),
(ins VectorRegs128B:$Vu32, VectorRegs128B:$Vv32),
"$Vd32.w = vmpyio($Vu32.w,$Vv32.h)",
-CVI_VX_DV, TypeCVI_VX_DV>, Enc_6223403, Requires<[HasV60T,UseHVX]> {
+tc_eda67dcd, TypeCVI_VX_DV>, Enc_45364e, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b001;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011111110;
@@ -39522,7 +39883,7 @@ def V6_vmpyiwb : HInst<
(outs VectorRegs:$Vd32),
(ins VectorRegs:$Vu32, IntRegs:$Rt32),
"$Vd32.w = vmpyi($Vu32.w,$Rt32.b)",
-CVI_VX, TypeCVI_VX>, Enc_16214129, Requires<[HasV60T,UseHVX]> {
+tc_69b6dd20, TypeCVI_VX>, Enc_b087ac, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011001101;
@@ -39534,7 +39895,7 @@ def V6_vmpyiwb_128B : HInst<
(outs VectorRegs128B:$Vd32),
(ins VectorRegs128B:$Vu32, IntRegs:$Rt32),
"$Vd32.w = vmpyi($Vu32.w,$Rt32.b)",
-CVI_VX, TypeCVI_VX>, Enc_16214129, Requires<[HasV60T,UseHVX]> {
+tc_69b6dd20, TypeCVI_VX>, Enc_b087ac, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011001101;
@@ -39547,7 +39908,7 @@ def V6_vmpyiwb_acc : HInst<
(outs VectorRegs:$Vx32),
(ins VectorRegs:$Vx32in, VectorRegs:$Vu32, IntRegs:$Rt32),
"$Vx32.w += vmpyi($Vu32.w,$Rt32.b)",
-CVI_VX, TypeCVI_VX>, Enc_10058269, Requires<[HasV60T,UseHVX]> {
+tc_d725e5b0, TypeCVI_VX>, Enc_5138b3, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b010;
let Inst{13-13} = 0b1;
let Inst{31-21} = 0b00011001010;
@@ -39561,7 +39922,7 @@ def V6_vmpyiwb_acc_128B : HInst<
(outs VectorRegs128B:$Vx32),
(ins VectorRegs128B:$Vx32in, VectorRegs128B:$Vu32, IntRegs:$Rt32),
"$Vx32.w += vmpyi($Vu32.w,$Rt32.b)",
-CVI_VX, TypeCVI_VX>, Enc_10058269, Requires<[HasV60T,UseHVX]> {
+tc_d725e5b0, TypeCVI_VX>, Enc_5138b3, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b010;
let Inst{13-13} = 0b1;
let Inst{31-21} = 0b00011001010;
@@ -39626,7 +39987,7 @@ def V6_vmpyiwh : HInst<
(outs VectorRegs:$Vd32),
(ins VectorRegs:$Vu32, IntRegs:$Rt32),
"$Vd32.w = vmpyi($Vu32.w,$Rt32.h)",
-CVI_VX_DV, TypeCVI_VX_DV>, Enc_16214129, Requires<[HasV60T,UseHVX]> {
+tc_7c3f55c4, TypeCVI_VX_DV>, Enc_b087ac, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b111;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011001100;
@@ -39638,7 +39999,7 @@ def V6_vmpyiwh_128B : HInst<
(outs VectorRegs128B:$Vd32),
(ins VectorRegs128B:$Vu32, IntRegs:$Rt32),
"$Vd32.w = vmpyi($Vu32.w,$Rt32.h)",
-CVI_VX_DV, TypeCVI_VX_DV>, Enc_16214129, Requires<[HasV60T,UseHVX]> {
+tc_7c3f55c4, TypeCVI_VX_DV>, Enc_b087ac, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b111;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011001100;
@@ -39651,7 +40012,7 @@ def V6_vmpyiwh_acc : HInst<
(outs VectorRegs:$Vx32),
(ins VectorRegs:$Vx32in, VectorRegs:$Vu32, IntRegs:$Rt32),
"$Vx32.w += vmpyi($Vu32.w,$Rt32.h)",
-CVI_VX_DV, TypeCVI_VX_DV>, Enc_10058269, Requires<[HasV60T,UseHVX]> {
+tc_d98f4d63, TypeCVI_VX_DV>, Enc_5138b3, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b011;
let Inst{13-13} = 0b1;
let Inst{31-21} = 0b00011001010;
@@ -39665,7 +40026,7 @@ def V6_vmpyiwh_acc_128B : HInst<
(outs VectorRegs128B:$Vx32),
(ins VectorRegs128B:$Vx32in, VectorRegs128B:$Vu32, IntRegs:$Rt32),
"$Vx32.w += vmpyi($Vu32.w,$Rt32.h)",
-CVI_VX_DV, TypeCVI_VX_DV>, Enc_10058269, Requires<[HasV60T,UseHVX]> {
+tc_d98f4d63, TypeCVI_VX_DV>, Enc_5138b3, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b011;
let Inst{13-13} = 0b1;
let Inst{31-21} = 0b00011001010;
@@ -39730,7 +40091,7 @@ def V6_vmpyiwub : HInst<
(outs VectorRegs:$Vd32),
(ins VectorRegs:$Vu32, IntRegs:$Rt32),
"$Vd32.w = vmpyi($Vu32.w,$Rt32.ub)",
-CVI_VX_LONG, TypeCVI_VX>, Enc_16214129, Requires<[HasV62T,UseHVX]> {
+tc_69b6dd20, TypeCVI_VX>, Enc_b087ac, Requires<[HasV62T,UseHVX]> {
let Inst{7-5} = 0b110;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011001100;
@@ -39742,7 +40103,7 @@ def V6_vmpyiwub_128B : HInst<
(outs VectorRegs128B:$Vd32),
(ins VectorRegs128B:$Vu32, IntRegs:$Rt32),
"$Vd32.w = vmpyi($Vu32.w,$Rt32.ub)",
-CVI_VX_LONG, TypeCVI_VX>, Enc_16214129, Requires<[HasV62T,UseHVX]> {
+tc_69b6dd20, TypeCVI_VX>, Enc_b087ac, Requires<[HasV62T,UseHVX]> {
let Inst{7-5} = 0b110;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011001100;
@@ -39755,7 +40116,7 @@ def V6_vmpyiwub_acc : HInst<
(outs VectorRegs:$Vx32),
(ins VectorRegs:$Vx32in, VectorRegs:$Vu32, IntRegs:$Rt32),
"$Vx32.w += vmpyi($Vu32.w,$Rt32.ub)",
-CVI_VX_LONG, TypeCVI_VX>, Enc_10058269, Requires<[HasV62T,UseHVX]> {
+tc_d725e5b0, TypeCVI_VX>, Enc_5138b3, Requires<[HasV62T,UseHVX]> {
let Inst{7-5} = 0b001;
let Inst{13-13} = 0b1;
let Inst{31-21} = 0b00011001100;
@@ -39769,7 +40130,7 @@ def V6_vmpyiwub_acc_128B : HInst<
(outs VectorRegs128B:$Vx32),
(ins VectorRegs128B:$Vx32in, VectorRegs128B:$Vu32, IntRegs:$Rt32),
"$Vx32.w += vmpyi($Vu32.w,$Rt32.ub)",
-CVI_VX_LONG, TypeCVI_VX>, Enc_10058269, Requires<[HasV62T,UseHVX]> {
+tc_d725e5b0, TypeCVI_VX>, Enc_5138b3, Requires<[HasV62T,UseHVX]> {
let Inst{7-5} = 0b001;
let Inst{13-13} = 0b1;
let Inst{31-21} = 0b00011001100;
@@ -39834,7 +40195,7 @@ def V6_vmpyowh : HInst<
(outs VectorRegs:$Vd32),
(ins VectorRegs:$Vu32, VectorRegs:$Vv32),
"$Vd32.w = vmpyo($Vu32.w,$Vv32.h):<<1:sat",
-CVI_VX_DV, TypeCVI_VX_DV>, Enc_6223403, Requires<[HasV60T,UseHVX]> {
+tc_eda67dcd, TypeCVI_VX_DV>, Enc_45364e, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b111;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011111111;
@@ -39846,7 +40207,7 @@ def V6_vmpyowh_128B : HInst<
(outs VectorRegs128B:$Vd32),
(ins VectorRegs128B:$Vu32, VectorRegs128B:$Vv32),
"$Vd32.w = vmpyo($Vu32.w,$Vv32.h):<<1:sat",
-CVI_VX_DV, TypeCVI_VX_DV>, Enc_6223403, Requires<[HasV60T,UseHVX]> {
+tc_eda67dcd, TypeCVI_VX_DV>, Enc_45364e, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b111;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011111111;
@@ -39859,7 +40220,7 @@ def V6_vmpyowh_64_acc : HInst<
(outs VecDblRegs:$Vxx32),
(ins VecDblRegs:$Vxx32in, VectorRegs:$Vu32, VectorRegs:$Vv32),
"$Vxx32 += vmpyo($Vu32.w,$Vv32.h)",
-CVI_VX_DV_LONG, TypeCVI_VX_DV>, Enc_5972412, Requires<[HasV62T,UseHVX]> {
+tc_e172d86a, TypeCVI_VX_DV>, Enc_3fc427, Requires<[HasV62T,UseHVX]> {
let Inst{7-5} = 0b011;
let Inst{13-13} = 0b1;
let Inst{31-21} = 0b00011100001;
@@ -39873,7 +40234,7 @@ def V6_vmpyowh_64_acc_128B : HInst<
(outs VecDblRegs128B:$Vxx32),
(ins VecDblRegs128B:$Vxx32in, VectorRegs128B:$Vu32, VectorRegs128B:$Vv32),
"$Vxx32 += vmpyo($Vu32.w,$Vv32.h)",
-CVI_VX_DV_LONG, TypeCVI_VX_DV>, Enc_5972412, Requires<[HasV62T,UseHVX]> {
+tc_e172d86a, TypeCVI_VX_DV>, Enc_3fc427, Requires<[HasV62T,UseHVX]> {
let Inst{7-5} = 0b011;
let Inst{13-13} = 0b1;
let Inst{31-21} = 0b00011100001;
@@ -39911,7 +40272,7 @@ def V6_vmpyowh_rnd : HInst<
(outs VectorRegs:$Vd32),
(ins VectorRegs:$Vu32, VectorRegs:$Vv32),
"$Vd32.w = vmpyo($Vu32.w,$Vv32.h):<<1:rnd:sat",
-CVI_VX_DV, TypeCVI_VX_DV>, Enc_6223403, Requires<[HasV60T,UseHVX]> {
+tc_eda67dcd, TypeCVI_VX_DV>, Enc_45364e, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011111010;
@@ -39923,7 +40284,7 @@ def V6_vmpyowh_rnd_128B : HInst<
(outs VectorRegs128B:$Vd32),
(ins VectorRegs128B:$Vu32, VectorRegs128B:$Vv32),
"$Vd32.w = vmpyo($Vu32.w,$Vv32.h):<<1:rnd:sat",
-CVI_VX_DV, TypeCVI_VX_DV>, Enc_6223403, Requires<[HasV60T,UseHVX]> {
+tc_eda67dcd, TypeCVI_VX_DV>, Enc_45364e, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011111010;
@@ -39959,7 +40320,7 @@ def V6_vmpyowh_rnd_sacc : HInst<
(outs VectorRegs:$Vx32),
(ins VectorRegs:$Vx32in, VectorRegs:$Vu32, VectorRegs:$Vv32),
"$Vx32.w += vmpyo($Vu32.w,$Vv32.h):<<1:rnd:sat:shift",
-CVI_VX_DV, TypeCVI_VX_DV>, Enc_2328527, Requires<[HasV60T,UseHVX]> {
+tc_e172d86a, TypeCVI_VX_DV>, Enc_a7341a, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b111;
let Inst{13-13} = 0b1;
let Inst{31-21} = 0b00011100001;
@@ -39973,7 +40334,7 @@ def V6_vmpyowh_rnd_sacc_128B : HInst<
(outs VectorRegs128B:$Vx32),
(ins VectorRegs128B:$Vx32in, VectorRegs128B:$Vu32, VectorRegs128B:$Vv32),
"$Vx32.w += vmpyo($Vu32.w,$Vv32.h):<<1:rnd:sat:shift",
-CVI_VX_DV, TypeCVI_VX_DV>, Enc_2328527, Requires<[HasV60T,UseHVX]> {
+tc_e172d86a, TypeCVI_VX_DV>, Enc_a7341a, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b111;
let Inst{13-13} = 0b1;
let Inst{31-21} = 0b00011100001;
@@ -40013,7 +40374,7 @@ def V6_vmpyowh_sacc : HInst<
(outs VectorRegs:$Vx32),
(ins VectorRegs:$Vx32in, VectorRegs:$Vu32, VectorRegs:$Vv32),
"$Vx32.w += vmpyo($Vu32.w,$Vv32.h):<<1:sat:shift",
-CVI_VX_DV, TypeCVI_VX_DV>, Enc_2328527, Requires<[HasV60T,UseHVX]> {
+tc_e172d86a, TypeCVI_VX_DV>, Enc_a7341a, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b110;
let Inst{13-13} = 0b1;
let Inst{31-21} = 0b00011100001;
@@ -40027,7 +40388,7 @@ def V6_vmpyowh_sacc_128B : HInst<
(outs VectorRegs128B:$Vx32),
(ins VectorRegs128B:$Vx32in, VectorRegs128B:$Vu32, VectorRegs128B:$Vv32),
"$Vx32.w += vmpyo($Vu32.w,$Vv32.h):<<1:sat:shift",
-CVI_VX_DV, TypeCVI_VX_DV>, Enc_2328527, Requires<[HasV60T,UseHVX]> {
+tc_e172d86a, TypeCVI_VX_DV>, Enc_a7341a, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b110;
let Inst{13-13} = 0b1;
let Inst{31-21} = 0b00011100001;
@@ -40067,7 +40428,7 @@ def V6_vmpyub : HInst<
(outs VecDblRegs:$Vdd32),
(ins VectorRegs:$Vu32, IntRegs:$Rt32),
"$Vdd32.uh = vmpy($Vu32.ub,$Rt32.ub)",
-CVI_VX_DV, TypeCVI_VX_DV>, Enc_11471622, Requires<[HasV60T,UseHVX]> {
+tc_7c3f55c4, TypeCVI_VX_DV>, Enc_01d3d0, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011001110;
@@ -40079,7 +40440,7 @@ def V6_vmpyub_128B : HInst<
(outs VecDblRegs128B:$Vdd32),
(ins VectorRegs128B:$Vu32, IntRegs:$Rt32),
"$Vdd32.uh = vmpy($Vu32.ub,$Rt32.ub)",
-CVI_VX_DV, TypeCVI_VX_DV>, Enc_11471622, Requires<[HasV60T,UseHVX]> {
+tc_7c3f55c4, TypeCVI_VX_DV>, Enc_01d3d0, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011001110;
@@ -40092,7 +40453,7 @@ def V6_vmpyub_acc : HInst<
(outs VecDblRegs:$Vxx32),
(ins VecDblRegs:$Vxx32in, VectorRegs:$Vu32, IntRegs:$Rt32),
"$Vxx32.uh += vmpy($Vu32.ub,$Rt32.ub)",
-CVI_VX_DV, TypeCVI_VX_DV>, Enc_2153798, Requires<[HasV60T,UseHVX]> {
+tc_d98f4d63, TypeCVI_VX_DV>, Enc_5e8512, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b1;
let Inst{31-21} = 0b00011001100;
@@ -40106,7 +40467,7 @@ def V6_vmpyub_acc_128B : HInst<
(outs VecDblRegs128B:$Vxx32),
(ins VecDblRegs128B:$Vxx32in, VectorRegs128B:$Vu32, IntRegs:$Rt32),
"$Vxx32.uh += vmpy($Vu32.ub,$Rt32.ub)",
-CVI_VX_DV, TypeCVI_VX_DV>, Enc_2153798, Requires<[HasV60T,UseHVX]> {
+tc_d98f4d63, TypeCVI_VX_DV>, Enc_5e8512, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b1;
let Inst{31-21} = 0b00011001100;
@@ -40171,7 +40532,7 @@ def V6_vmpyubv : HInst<
(outs VecDblRegs:$Vdd32),
(ins VectorRegs:$Vu32, VectorRegs:$Vv32),
"$Vdd32.uh = vmpy($Vu32.ub,$Vv32.ub)",
-CVI_VX_DV, TypeCVI_VX_DV>, Enc_15290236, Requires<[HasV60T,UseHVX]> {
+tc_eda67dcd, TypeCVI_VX_DV>, Enc_71bb9b, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b101;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011100000;
@@ -40183,7 +40544,7 @@ def V6_vmpyubv_128B : HInst<
(outs VecDblRegs128B:$Vdd32),
(ins VectorRegs128B:$Vu32, VectorRegs128B:$Vv32),
"$Vdd32.uh = vmpy($Vu32.ub,$Vv32.ub)",
-CVI_VX_DV, TypeCVI_VX_DV>, Enc_15290236, Requires<[HasV60T,UseHVX]> {
+tc_eda67dcd, TypeCVI_VX_DV>, Enc_71bb9b, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b101;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011100000;
@@ -40196,7 +40557,7 @@ def V6_vmpyubv_acc : HInst<
(outs VecDblRegs:$Vxx32),
(ins VecDblRegs:$Vxx32in, VectorRegs:$Vu32, VectorRegs:$Vv32),
"$Vxx32.uh += vmpy($Vu32.ub,$Vv32.ub)",
-CVI_VX_DV, TypeCVI_VX_DV>, Enc_5972412, Requires<[HasV60T,UseHVX]> {
+tc_e172d86a, TypeCVI_VX_DV>, Enc_3fc427, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b101;
let Inst{13-13} = 0b1;
let Inst{31-21} = 0b00011100000;
@@ -40210,7 +40571,7 @@ def V6_vmpyubv_acc_128B : HInst<
(outs VecDblRegs128B:$Vxx32),
(ins VecDblRegs128B:$Vxx32in, VectorRegs128B:$Vu32, VectorRegs128B:$Vv32),
"$Vxx32.uh += vmpy($Vu32.ub,$Vv32.ub)",
-CVI_VX_DV, TypeCVI_VX_DV>, Enc_5972412, Requires<[HasV60T,UseHVX]> {
+tc_e172d86a, TypeCVI_VX_DV>, Enc_3fc427, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b101;
let Inst{13-13} = 0b1;
let Inst{31-21} = 0b00011100000;
@@ -40275,7 +40636,7 @@ def V6_vmpyuh : HInst<
(outs VecDblRegs:$Vdd32),
(ins VectorRegs:$Vu32, IntRegs:$Rt32),
"$Vdd32.uw = vmpy($Vu32.uh,$Rt32.uh)",
-CVI_VX_DV, TypeCVI_VX_DV>, Enc_11471622, Requires<[HasV60T,UseHVX]> {
+tc_7c3f55c4, TypeCVI_VX_DV>, Enc_01d3d0, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b011;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011001010;
@@ -40287,7 +40648,7 @@ def V6_vmpyuh_128B : HInst<
(outs VecDblRegs128B:$Vdd32),
(ins VectorRegs128B:$Vu32, IntRegs:$Rt32),
"$Vdd32.uw = vmpy($Vu32.uh,$Rt32.uh)",
-CVI_VX_DV, TypeCVI_VX_DV>, Enc_11471622, Requires<[HasV60T,UseHVX]> {
+tc_7c3f55c4, TypeCVI_VX_DV>, Enc_01d3d0, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b011;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011001010;
@@ -40300,7 +40661,7 @@ def V6_vmpyuh_acc : HInst<
(outs VecDblRegs:$Vxx32),
(ins VecDblRegs:$Vxx32in, VectorRegs:$Vu32, IntRegs:$Rt32),
"$Vxx32.uw += vmpy($Vu32.uh,$Rt32.uh)",
-CVI_VX_DV, TypeCVI_VX_DV>, Enc_2153798, Requires<[HasV60T,UseHVX]> {
+tc_d98f4d63, TypeCVI_VX_DV>, Enc_5e8512, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b001;
let Inst{13-13} = 0b1;
let Inst{31-21} = 0b00011001010;
@@ -40314,7 +40675,7 @@ def V6_vmpyuh_acc_128B : HInst<
(outs VecDblRegs128B:$Vxx32),
(ins VecDblRegs128B:$Vxx32in, VectorRegs128B:$Vu32, IntRegs:$Rt32),
"$Vxx32.uw += vmpy($Vu32.uh,$Rt32.uh)",
-CVI_VX_DV, TypeCVI_VX_DV>, Enc_2153798, Requires<[HasV60T,UseHVX]> {
+tc_d98f4d63, TypeCVI_VX_DV>, Enc_5e8512, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b001;
let Inst{13-13} = 0b1;
let Inst{31-21} = 0b00011001010;
@@ -40379,7 +40740,7 @@ def V6_vmpyuhv : HInst<
(outs VecDblRegs:$Vdd32),
(ins VectorRegs:$Vu32, VectorRegs:$Vv32),
"$Vdd32.uw = vmpy($Vu32.uh,$Vv32.uh)",
-CVI_VX_DV, TypeCVI_VX_DV>, Enc_15290236, Requires<[HasV60T,UseHVX]> {
+tc_eda67dcd, TypeCVI_VX_DV>, Enc_71bb9b, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011100001;
@@ -40391,7 +40752,7 @@ def V6_vmpyuhv_128B : HInst<
(outs VecDblRegs128B:$Vdd32),
(ins VectorRegs128B:$Vu32, VectorRegs128B:$Vv32),
"$Vdd32.uw = vmpy($Vu32.uh,$Vv32.uh)",
-CVI_VX_DV, TypeCVI_VX_DV>, Enc_15290236, Requires<[HasV60T,UseHVX]> {
+tc_eda67dcd, TypeCVI_VX_DV>, Enc_71bb9b, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011100001;
@@ -40404,7 +40765,7 @@ def V6_vmpyuhv_acc : HInst<
(outs VecDblRegs:$Vxx32),
(ins VecDblRegs:$Vxx32in, VectorRegs:$Vu32, VectorRegs:$Vv32),
"$Vxx32.uw += vmpy($Vu32.uh,$Vv32.uh)",
-CVI_VX_DV, TypeCVI_VX_DV>, Enc_5972412, Requires<[HasV60T,UseHVX]> {
+tc_e172d86a, TypeCVI_VX_DV>, Enc_3fc427, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b1;
let Inst{31-21} = 0b00011100001;
@@ -40418,7 +40779,7 @@ def V6_vmpyuhv_acc_128B : HInst<
(outs VecDblRegs128B:$Vxx32),
(ins VecDblRegs128B:$Vxx32in, VectorRegs128B:$Vu32, VectorRegs128B:$Vv32),
"$Vxx32.uw += vmpy($Vu32.uh,$Vv32.uh)",
-CVI_VX_DV, TypeCVI_VX_DV>, Enc_5972412, Requires<[HasV60T,UseHVX]> {
+tc_e172d86a, TypeCVI_VX_DV>, Enc_3fc427, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b1;
let Inst{31-21} = 0b00011100001;
@@ -40483,7 +40844,7 @@ def V6_vmux : HInst<
(outs VectorRegs:$Vd32),
(ins VecPredRegs:$Qt4, VectorRegs:$Vu32, VectorRegs:$Vv32),
"$Vd32 = vmux($Qt4,$Vu32,$Vv32)",
-CVI_VA, TypeCVI_VA>, Enc_1572239, Requires<[HasV60T,UseHVX]> {
+tc_a3127e12, TypeCVI_VA>, Enc_31db33, Requires<[HasV60T,UseHVX]> {
let Inst{7-7} = 0b0;
let Inst{13-13} = 0b1;
let Inst{31-21} = 0b00011110111;
@@ -40495,7 +40856,7 @@ def V6_vmux_128B : HInst<
(outs VectorRegs128B:$Vd32),
(ins VecPredRegs128B:$Qt4, VectorRegs128B:$Vu32, VectorRegs128B:$Vv32),
"$Vd32 = vmux($Qt4,$Vu32,$Vv32)",
-CVI_VA, TypeCVI_VA>, Enc_1572239, Requires<[HasV60T,UseHVX]> {
+tc_a3127e12, TypeCVI_VA>, Enc_31db33, Requires<[HasV60T,UseHVX]> {
let Inst{7-7} = 0b0;
let Inst{13-13} = 0b1;
let Inst{31-21} = 0b00011110111;
@@ -40508,7 +40869,7 @@ def V6_vnavgh : HInst<
(outs VectorRegs:$Vd32),
(ins VectorRegs:$Vu32, VectorRegs:$Vv32),
"$Vd32.h = vnavg($Vu32.h,$Vv32.h)",
-CVI_VA, TypeCVI_VA>, Enc_6223403, Requires<[HasV60T,UseHVX]> {
+tc_bbaf280e, TypeCVI_VA>, Enc_45364e, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b001;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011100111;
@@ -40520,7 +40881,7 @@ def V6_vnavgh_128B : HInst<
(outs VectorRegs128B:$Vd32),
(ins VectorRegs128B:$Vu32, VectorRegs128B:$Vv32),
"$Vd32.h = vnavg($Vu32.h,$Vv32.h)",
-CVI_VA, TypeCVI_VA>, Enc_6223403, Requires<[HasV60T,UseHVX]> {
+tc_bbaf280e, TypeCVI_VA>, Enc_45364e, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b001;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011100111;
@@ -40556,7 +40917,7 @@ def V6_vnavgub : HInst<
(outs VectorRegs:$Vd32),
(ins VectorRegs:$Vu32, VectorRegs:$Vv32),
"$Vd32.b = vnavg($Vu32.ub,$Vv32.ub)",
-CVI_VA, TypeCVI_VA>, Enc_6223403, Requires<[HasV60T,UseHVX]> {
+tc_bbaf280e, TypeCVI_VA>, Enc_45364e, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011100111;
@@ -40568,7 +40929,7 @@ def V6_vnavgub_128B : HInst<
(outs VectorRegs128B:$Vd32),
(ins VectorRegs128B:$Vu32, VectorRegs128B:$Vv32),
"$Vd32.b = vnavg($Vu32.ub,$Vv32.ub)",
-CVI_VA, TypeCVI_VA>, Enc_6223403, Requires<[HasV60T,UseHVX]> {
+tc_bbaf280e, TypeCVI_VA>, Enc_45364e, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011100111;
@@ -40604,7 +40965,7 @@ def V6_vnavgw : HInst<
(outs VectorRegs:$Vd32),
(ins VectorRegs:$Vu32, VectorRegs:$Vv32),
"$Vd32.w = vnavg($Vu32.w,$Vv32.w)",
-CVI_VA, TypeCVI_VA>, Enc_6223403, Requires<[HasV60T,UseHVX]> {
+tc_bbaf280e, TypeCVI_VA>, Enc_45364e, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b010;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011100111;
@@ -40616,7 +40977,7 @@ def V6_vnavgw_128B : HInst<
(outs VectorRegs128B:$Vd32),
(ins VectorRegs128B:$Vu32, VectorRegs128B:$Vv32),
"$Vd32.w = vnavg($Vu32.w,$Vv32.w)",
-CVI_VA, TypeCVI_VA>, Enc_6223403, Requires<[HasV60T,UseHVX]> {
+tc_bbaf280e, TypeCVI_VA>, Enc_45364e, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b010;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011100111;
@@ -40652,7 +41013,7 @@ def V6_vnccombine : HInst<
(outs VecDblRegs:$Vdd32),
(ins PredRegs:$Ps4, VectorRegs:$Vu32, VectorRegs:$Vv32),
"if (!$Ps4) $Vdd32 = vcombine($Vu32,$Vv32)",
-CVI_VA_DV, TypeCVI_VA_DV>, Enc_16145290, Requires<[HasV60T,UseHVX]> {
+tc_2171ebae, TypeCVI_VA_DV>, Enc_8c2412, Requires<[HasV60T,UseHVX]> {
let Inst{7-7} = 0b0;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011010010;
@@ -40666,7 +41027,7 @@ def V6_vnccombine_128B : HInst<
(outs VecDblRegs128B:$Vdd32),
(ins PredRegs:$Ps4, VectorRegs128B:$Vu32, VectorRegs128B:$Vv32),
"if (!$Ps4) $Vdd32 = vcombine($Vu32,$Vv32)",
-CVI_VA_DV, TypeCVI_VA_DV>, Enc_16145290, Requires<[HasV60T,UseHVX]> {
+tc_2171ebae, TypeCVI_VA_DV>, Enc_8c2412, Requires<[HasV60T,UseHVX]> {
let Inst{7-7} = 0b0;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011010010;
@@ -40681,7 +41042,7 @@ def V6_vncmov : HInst<
(outs VectorRegs:$Vd32),
(ins PredRegs:$Ps4, VectorRegs:$Vu32),
"if (!$Ps4) $Vd32 = $Vu32",
-CVI_VA, TypeCVI_VA>, Enc_12023037, Requires<[HasV60T,UseHVX]> {
+tc_b06ab583, TypeCVI_VA>, Enc_770858, Requires<[HasV60T,UseHVX]> {
let Inst{7-7} = 0b0;
let Inst{13-13} = 0b0;
let Inst{31-16} = 0b0001101000100000;
@@ -40695,7 +41056,7 @@ def V6_vncmov_128B : HInst<
(outs VectorRegs128B:$Vd32),
(ins PredRegs:$Ps4, VectorRegs128B:$Vu32),
"if (!$Ps4) $Vd32 = $Vu32",
-CVI_VA, TypeCVI_VA>, Enc_12023037, Requires<[HasV60T,UseHVX]> {
+tc_b06ab583, TypeCVI_VA>, Enc_770858, Requires<[HasV60T,UseHVX]> {
let Inst{7-7} = 0b0;
let Inst{13-13} = 0b0;
let Inst{31-16} = 0b0001101000100000;
@@ -40710,7 +41071,7 @@ def V6_vnormamth : HInst<
(outs VectorRegs:$Vd32),
(ins VectorRegs:$Vu32),
"$Vd32.h = vnormamt($Vu32.h)",
-CVI_VS, TypeCVI_VS>, Enc_900013, Requires<[HasV60T,UseHVX]> {
+tc_d2cb81ea, TypeCVI_VS>, Enc_e7581c, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b101;
let Inst{13-13} = 0b0;
let Inst{31-16} = 0b0001111000000011;
@@ -40722,7 +41083,7 @@ def V6_vnormamth_128B : HInst<
(outs VectorRegs128B:$Vd32),
(ins VectorRegs128B:$Vu32),
"$Vd32.h = vnormamt($Vu32.h)",
-CVI_VS, TypeCVI_VS>, Enc_900013, Requires<[HasV60T,UseHVX]> {
+tc_d2cb81ea, TypeCVI_VS>, Enc_e7581c, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b101;
let Inst{13-13} = 0b0;
let Inst{31-16} = 0b0001111000000011;
@@ -40758,7 +41119,7 @@ def V6_vnormamtw : HInst<
(outs VectorRegs:$Vd32),
(ins VectorRegs:$Vu32),
"$Vd32.w = vnormamt($Vu32.w)",
-CVI_VS, TypeCVI_VS>, Enc_900013, Requires<[HasV60T,UseHVX]> {
+tc_d2cb81ea, TypeCVI_VS>, Enc_e7581c, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b100;
let Inst{13-13} = 0b0;
let Inst{31-16} = 0b0001111000000011;
@@ -40770,7 +41131,7 @@ def V6_vnormamtw_128B : HInst<
(outs VectorRegs128B:$Vd32),
(ins VectorRegs128B:$Vu32),
"$Vd32.w = vnormamt($Vu32.w)",
-CVI_VS, TypeCVI_VS>, Enc_900013, Requires<[HasV60T,UseHVX]> {
+tc_d2cb81ea, TypeCVI_VS>, Enc_e7581c, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b100;
let Inst{13-13} = 0b0;
let Inst{31-16} = 0b0001111000000011;
@@ -40806,7 +41167,7 @@ def V6_vnot : HInst<
(outs VectorRegs:$Vd32),
(ins VectorRegs:$Vu32),
"$Vd32 = vnot($Vu32)",
-CVI_VA, TypeCVI_VA>, Enc_900013, Requires<[HasV60T,UseHVX]> {
+tc_71337255, TypeCVI_VA>, Enc_e7581c, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b100;
let Inst{13-13} = 0b0;
let Inst{31-16} = 0b0001111000000000;
@@ -40818,7 +41179,7 @@ def V6_vnot_128B : HInst<
(outs VectorRegs128B:$Vd32),
(ins VectorRegs128B:$Vu32),
"$Vd32 = vnot($Vu32)",
-CVI_VA, TypeCVI_VA>, Enc_900013, Requires<[HasV60T,UseHVX]> {
+tc_71337255, TypeCVI_VA>, Enc_e7581c, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b100;
let Inst{13-13} = 0b0;
let Inst{31-16} = 0b0001111000000000;
@@ -40831,7 +41192,7 @@ def V6_vor : HInst<
(outs VectorRegs:$Vd32),
(ins VectorRegs:$Vu32, VectorRegs:$Vv32),
"$Vd32 = vor($Vu32,$Vv32)",
-CVI_VA, TypeCVI_VA>, Enc_6223403, Requires<[HasV60T,UseHVX]> {
+tc_bbaf280e, TypeCVI_VA>, Enc_45364e, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b110;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011100001;
@@ -40843,7 +41204,7 @@ def V6_vor_128B : HInst<
(outs VectorRegs128B:$Vd32),
(ins VectorRegs128B:$Vu32, VectorRegs128B:$Vv32),
"$Vd32 = vor($Vu32,$Vv32)",
-CVI_VA, TypeCVI_VA>, Enc_6223403, Requires<[HasV60T,UseHVX]> {
+tc_bbaf280e, TypeCVI_VA>, Enc_45364e, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b110;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011100001;
@@ -40856,7 +41217,7 @@ def V6_vpackeb : HInst<
(outs VectorRegs:$Vd32),
(ins VectorRegs:$Vu32, VectorRegs:$Vv32),
"$Vd32.b = vpacke($Vu32.h,$Vv32.h)",
-CVI_VP, TypeCVI_VP>, Enc_6223403, Requires<[HasV60T,UseHVX]> {
+tc_f3fc3f83, TypeCVI_VP>, Enc_45364e, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b010;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011111110;
@@ -40868,7 +41229,7 @@ def V6_vpackeb_128B : HInst<
(outs VectorRegs128B:$Vd32),
(ins VectorRegs128B:$Vu32, VectorRegs128B:$Vv32),
"$Vd32.b = vpacke($Vu32.h,$Vv32.h)",
-CVI_VP, TypeCVI_VP>, Enc_6223403, Requires<[HasV60T,UseHVX]> {
+tc_f3fc3f83, TypeCVI_VP>, Enc_45364e, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b010;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011111110;
@@ -40904,7 +41265,7 @@ def V6_vpackeh : HInst<
(outs VectorRegs:$Vd32),
(ins VectorRegs:$Vu32, VectorRegs:$Vv32),
"$Vd32.h = vpacke($Vu32.w,$Vv32.w)",
-CVI_VP, TypeCVI_VP>, Enc_6223403, Requires<[HasV60T,UseHVX]> {
+tc_f3fc3f83, TypeCVI_VP>, Enc_45364e, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b011;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011111110;
@@ -40916,7 +41277,7 @@ def V6_vpackeh_128B : HInst<
(outs VectorRegs128B:$Vd32),
(ins VectorRegs128B:$Vu32, VectorRegs128B:$Vv32),
"$Vd32.h = vpacke($Vu32.w,$Vv32.w)",
-CVI_VP, TypeCVI_VP>, Enc_6223403, Requires<[HasV60T,UseHVX]> {
+tc_f3fc3f83, TypeCVI_VP>, Enc_45364e, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b011;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011111110;
@@ -40952,7 +41313,7 @@ def V6_vpackhb_sat : HInst<
(outs VectorRegs:$Vd32),
(ins VectorRegs:$Vu32, VectorRegs:$Vv32),
"$Vd32.b = vpack($Vu32.h,$Vv32.h):sat",
-CVI_VP, TypeCVI_VP>, Enc_6223403, Requires<[HasV60T,UseHVX]> {
+tc_f3fc3f83, TypeCVI_VP>, Enc_45364e, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b110;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011111110;
@@ -40964,7 +41325,7 @@ def V6_vpackhb_sat_128B : HInst<
(outs VectorRegs128B:$Vd32),
(ins VectorRegs128B:$Vu32, VectorRegs128B:$Vv32),
"$Vd32.b = vpack($Vu32.h,$Vv32.h):sat",
-CVI_VP, TypeCVI_VP>, Enc_6223403, Requires<[HasV60T,UseHVX]> {
+tc_f3fc3f83, TypeCVI_VP>, Enc_45364e, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b110;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011111110;
@@ -41000,7 +41361,7 @@ def V6_vpackhub_sat : HInst<
(outs VectorRegs:$Vd32),
(ins VectorRegs:$Vu32, VectorRegs:$Vv32),
"$Vd32.ub = vpack($Vu32.h,$Vv32.h):sat",
-CVI_VP, TypeCVI_VP>, Enc_6223403, Requires<[HasV60T,UseHVX]> {
+tc_f3fc3f83, TypeCVI_VP>, Enc_45364e, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b101;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011111110;
@@ -41012,7 +41373,7 @@ def V6_vpackhub_sat_128B : HInst<
(outs VectorRegs128B:$Vd32),
(ins VectorRegs128B:$Vu32, VectorRegs128B:$Vv32),
"$Vd32.ub = vpack($Vu32.h,$Vv32.h):sat",
-CVI_VP, TypeCVI_VP>, Enc_6223403, Requires<[HasV60T,UseHVX]> {
+tc_f3fc3f83, TypeCVI_VP>, Enc_45364e, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b101;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011111110;
@@ -41048,7 +41409,7 @@ def V6_vpackob : HInst<
(outs VectorRegs:$Vd32),
(ins VectorRegs:$Vu32, VectorRegs:$Vv32),
"$Vd32.b = vpacko($Vu32.h,$Vv32.h)",
-CVI_VP, TypeCVI_VP>, Enc_6223403, Requires<[HasV60T,UseHVX]> {
+tc_f3fc3f83, TypeCVI_VP>, Enc_45364e, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b001;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011111111;
@@ -41060,7 +41421,7 @@ def V6_vpackob_128B : HInst<
(outs VectorRegs128B:$Vd32),
(ins VectorRegs128B:$Vu32, VectorRegs128B:$Vv32),
"$Vd32.b = vpacko($Vu32.h,$Vv32.h)",
-CVI_VP, TypeCVI_VP>, Enc_6223403, Requires<[HasV60T,UseHVX]> {
+tc_f3fc3f83, TypeCVI_VP>, Enc_45364e, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b001;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011111111;
@@ -41096,7 +41457,7 @@ def V6_vpackoh : HInst<
(outs VectorRegs:$Vd32),
(ins VectorRegs:$Vu32, VectorRegs:$Vv32),
"$Vd32.h = vpacko($Vu32.w,$Vv32.w)",
-CVI_VP, TypeCVI_VP>, Enc_6223403, Requires<[HasV60T,UseHVX]> {
+tc_f3fc3f83, TypeCVI_VP>, Enc_45364e, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b010;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011111111;
@@ -41108,7 +41469,7 @@ def V6_vpackoh_128B : HInst<
(outs VectorRegs128B:$Vd32),
(ins VectorRegs128B:$Vu32, VectorRegs128B:$Vv32),
"$Vd32.h = vpacko($Vu32.w,$Vv32.w)",
-CVI_VP, TypeCVI_VP>, Enc_6223403, Requires<[HasV60T,UseHVX]> {
+tc_f3fc3f83, TypeCVI_VP>, Enc_45364e, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b010;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011111111;
@@ -41144,7 +41505,7 @@ def V6_vpackwh_sat : HInst<
(outs VectorRegs:$Vd32),
(ins VectorRegs:$Vu32, VectorRegs:$Vv32),
"$Vd32.h = vpack($Vu32.w,$Vv32.w):sat",
-CVI_VP, TypeCVI_VP>, Enc_6223403, Requires<[HasV60T,UseHVX]> {
+tc_f3fc3f83, TypeCVI_VP>, Enc_45364e, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011111111;
@@ -41156,7 +41517,7 @@ def V6_vpackwh_sat_128B : HInst<
(outs VectorRegs128B:$Vd32),
(ins VectorRegs128B:$Vu32, VectorRegs128B:$Vv32),
"$Vd32.h = vpack($Vu32.w,$Vv32.w):sat",
-CVI_VP, TypeCVI_VP>, Enc_6223403, Requires<[HasV60T,UseHVX]> {
+tc_f3fc3f83, TypeCVI_VP>, Enc_45364e, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011111111;
@@ -41192,7 +41553,7 @@ def V6_vpackwuh_sat : HInst<
(outs VectorRegs:$Vd32),
(ins VectorRegs:$Vu32, VectorRegs:$Vv32),
"$Vd32.uh = vpack($Vu32.w,$Vv32.w):sat",
-CVI_VP, TypeCVI_VP>, Enc_6223403, Requires<[HasV60T,UseHVX]> {
+tc_f3fc3f83, TypeCVI_VP>, Enc_45364e, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b111;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011111110;
@@ -41204,7 +41565,7 @@ def V6_vpackwuh_sat_128B : HInst<
(outs VectorRegs128B:$Vd32),
(ins VectorRegs128B:$Vu32, VectorRegs128B:$Vv32),
"$Vd32.uh = vpack($Vu32.w,$Vv32.w):sat",
-CVI_VP, TypeCVI_VP>, Enc_6223403, Requires<[HasV60T,UseHVX]> {
+tc_f3fc3f83, TypeCVI_VP>, Enc_45364e, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b111;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011111110;
@@ -41240,7 +41601,7 @@ def V6_vpopcounth : HInst<
(outs VectorRegs:$Vd32),
(ins VectorRegs:$Vu32),
"$Vd32.h = vpopcount($Vu32.h)",
-CVI_VS, TypeCVI_VS>, Enc_900013, Requires<[HasV60T,UseHVX]> {
+tc_d2cb81ea, TypeCVI_VS>, Enc_e7581c, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b110;
let Inst{13-13} = 0b0;
let Inst{31-16} = 0b0001111000000010;
@@ -41252,7 +41613,7 @@ def V6_vpopcounth_128B : HInst<
(outs VectorRegs128B:$Vd32),
(ins VectorRegs128B:$Vu32),
"$Vd32.h = vpopcount($Vu32.h)",
-CVI_VS, TypeCVI_VS>, Enc_900013, Requires<[HasV60T,UseHVX]> {
+tc_d2cb81ea, TypeCVI_VS>, Enc_e7581c, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b110;
let Inst{13-13} = 0b0;
let Inst{31-16} = 0b0001111000000010;
@@ -41288,7 +41649,7 @@ def V6_vrdelta : HInst<
(outs VectorRegs:$Vd32),
(ins VectorRegs:$Vu32, VectorRegs:$Vv32),
"$Vd32 = vrdelta($Vu32,$Vv32)",
-CVI_VP, TypeCVI_VP>, Enc_6223403, Requires<[HasV60T,UseHVX]> {
+tc_f3fc3f83, TypeCVI_VP>, Enc_45364e, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b011;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011111001;
@@ -41300,7 +41661,7 @@ def V6_vrdelta_128B : HInst<
(outs VectorRegs128B:$Vd32),
(ins VectorRegs128B:$Vu32, VectorRegs128B:$Vv32),
"$Vd32 = vrdelta($Vu32,$Vv32)",
-CVI_VP, TypeCVI_VP>, Enc_6223403, Requires<[HasV60T,UseHVX]> {
+tc_f3fc3f83, TypeCVI_VP>, Enc_45364e, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b011;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011111001;
@@ -41313,7 +41674,7 @@ def V6_vrmpybus : HInst<
(outs VectorRegs:$Vd32),
(ins VectorRegs:$Vu32, IntRegs:$Rt32),
"$Vd32.w = vrmpy($Vu32.ub,$Rt32.b)",
-CVI_VX, TypeCVI_VX>, Enc_16214129, Requires<[HasV60T,UseHVX]> {
+tc_69b6dd20, TypeCVI_VX>, Enc_b087ac, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b100;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011001000;
@@ -41325,7 +41686,7 @@ def V6_vrmpybus_128B : HInst<
(outs VectorRegs128B:$Vd32),
(ins VectorRegs128B:$Vu32, IntRegs:$Rt32),
"$Vd32.w = vrmpy($Vu32.ub,$Rt32.b)",
-CVI_VX, TypeCVI_VX>, Enc_16214129, Requires<[HasV60T,UseHVX]> {
+tc_69b6dd20, TypeCVI_VX>, Enc_b087ac, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b100;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011001000;
@@ -41338,7 +41699,7 @@ def V6_vrmpybus_acc : HInst<
(outs VectorRegs:$Vx32),
(ins VectorRegs:$Vx32in, VectorRegs:$Vu32, IntRegs:$Rt32),
"$Vx32.w += vrmpy($Vu32.ub,$Rt32.b)",
-CVI_VX, TypeCVI_VX>, Enc_10058269, Requires<[HasV60T,UseHVX]> {
+tc_d725e5b0, TypeCVI_VX>, Enc_5138b3, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b101;
let Inst{13-13} = 0b1;
let Inst{31-21} = 0b00011001000;
@@ -41352,7 +41713,7 @@ def V6_vrmpybus_acc_128B : HInst<
(outs VectorRegs128B:$Vx32),
(ins VectorRegs128B:$Vx32in, VectorRegs128B:$Vu32, IntRegs:$Rt32),
"$Vx32.w += vrmpy($Vu32.ub,$Rt32.b)",
-CVI_VX, TypeCVI_VX>, Enc_10058269, Requires<[HasV60T,UseHVX]> {
+tc_d725e5b0, TypeCVI_VX>, Enc_5138b3, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b101;
let Inst{13-13} = 0b1;
let Inst{31-21} = 0b00011001000;
@@ -41417,7 +41778,7 @@ def V6_vrmpybusi : HInst<
(outs VecDblRegs:$Vdd32),
(ins VecDblRegs:$Vuu32, IntRegs:$Rt32, u1_0Imm:$Ii),
"$Vdd32.w = vrmpy($Vuu32.ub,$Rt32.b,#$Ii)",
-CVI_VX_DV_LONG, TypeCVI_VX_DV>, Enc_14172170, Requires<[HasV60T,UseHVX]> {
+tc_7e9f581b, TypeCVI_VX_DV>, Enc_2f2f04, Requires<[HasV60T,UseHVX]> {
let Inst{7-6} = 0b10;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011001010;
@@ -41429,7 +41790,7 @@ def V6_vrmpybusi_128B : HInst<
(outs VecDblRegs128B:$Vdd32),
(ins VecDblRegs128B:$Vuu32, IntRegs:$Rt32, u1_0Imm:$Ii),
"$Vdd32.w = vrmpy($Vuu32.ub,$Rt32.b,#$Ii)",
-CVI_VX_DV_LONG, TypeCVI_VX_DV>, Enc_14172170, Requires<[HasV60T,UseHVX]> {
+tc_7e9f581b, TypeCVI_VX_DV>, Enc_2f2f04, Requires<[HasV60T,UseHVX]> {
let Inst{7-6} = 0b10;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011001010;
@@ -41442,7 +41803,7 @@ def V6_vrmpybusi_acc : HInst<
(outs VecDblRegs:$Vxx32),
(ins VecDblRegs:$Vxx32in, VecDblRegs:$Vuu32, IntRegs:$Rt32, u1_0Imm:$Ii),
"$Vxx32.w += vrmpy($Vuu32.ub,$Rt32.b,#$Ii)",
-CVI_VX_DV_LONG, TypeCVI_VX_DV>, Enc_13189194, Requires<[HasV60T,UseHVX]> {
+tc_41f99e1c, TypeCVI_VX_DV>, Enc_d483b9, Requires<[HasV60T,UseHVX]> {
let Inst{7-6} = 0b10;
let Inst{13-13} = 0b1;
let Inst{31-21} = 0b00011001010;
@@ -41456,7 +41817,7 @@ def V6_vrmpybusi_acc_128B : HInst<
(outs VecDblRegs128B:$Vxx32),
(ins VecDblRegs128B:$Vxx32in, VecDblRegs128B:$Vuu32, IntRegs:$Rt32, u1_0Imm:$Ii),
"$Vxx32.w += vrmpy($Vuu32.ub,$Rt32.b,#$Ii)",
-CVI_VX_DV_LONG, TypeCVI_VX_DV>, Enc_13189194, Requires<[HasV60T,UseHVX]> {
+tc_41f99e1c, TypeCVI_VX_DV>, Enc_d483b9, Requires<[HasV60T,UseHVX]> {
let Inst{7-6} = 0b10;
let Inst{13-13} = 0b1;
let Inst{31-21} = 0b00011001010;
@@ -41521,7 +41882,7 @@ def V6_vrmpybusv : HInst<
(outs VectorRegs:$Vd32),
(ins VectorRegs:$Vu32, VectorRegs:$Vv32),
"$Vd32.w = vrmpy($Vu32.ub,$Vv32.b)",
-CVI_VX, TypeCVI_VX>, Enc_6223403, Requires<[HasV60T,UseHVX]> {
+tc_908a4c8c, TypeCVI_VX>, Enc_45364e, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b010;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011100000;
@@ -41533,7 +41894,7 @@ def V6_vrmpybusv_128B : HInst<
(outs VectorRegs128B:$Vd32),
(ins VectorRegs128B:$Vu32, VectorRegs128B:$Vv32),
"$Vd32.w = vrmpy($Vu32.ub,$Vv32.b)",
-CVI_VX, TypeCVI_VX>, Enc_6223403, Requires<[HasV60T,UseHVX]> {
+tc_908a4c8c, TypeCVI_VX>, Enc_45364e, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b010;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011100000;
@@ -41546,7 +41907,7 @@ def V6_vrmpybusv_acc : HInst<
(outs VectorRegs:$Vx32),
(ins VectorRegs:$Vx32in, VectorRegs:$Vu32, VectorRegs:$Vv32),
"$Vx32.w += vrmpy($Vu32.ub,$Vv32.b)",
-CVI_VX_DV, TypeCVI_VX_DV>, Enc_2328527, Requires<[HasV60T,UseHVX]> {
+tc_e172d86a, TypeCVI_VX_DV>, Enc_a7341a, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b010;
let Inst{13-13} = 0b1;
let Inst{31-21} = 0b00011100000;
@@ -41560,7 +41921,7 @@ def V6_vrmpybusv_acc_128B : HInst<
(outs VectorRegs128B:$Vx32),
(ins VectorRegs128B:$Vx32in, VectorRegs128B:$Vu32, VectorRegs128B:$Vv32),
"$Vx32.w += vrmpy($Vu32.ub,$Vv32.b)",
-CVI_VX_DV, TypeCVI_VX_DV>, Enc_2328527, Requires<[HasV60T,UseHVX]> {
+tc_e172d86a, TypeCVI_VX_DV>, Enc_a7341a, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b010;
let Inst{13-13} = 0b1;
let Inst{31-21} = 0b00011100000;
@@ -41625,7 +41986,7 @@ def V6_vrmpybv : HInst<
(outs VectorRegs:$Vd32),
(ins VectorRegs:$Vu32, VectorRegs:$Vv32),
"$Vd32.w = vrmpy($Vu32.b,$Vv32.b)",
-CVI_VX, TypeCVI_VX>, Enc_6223403, Requires<[HasV60T,UseHVX]> {
+tc_908a4c8c, TypeCVI_VX>, Enc_45364e, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b001;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011100000;
@@ -41637,7 +41998,7 @@ def V6_vrmpybv_128B : HInst<
(outs VectorRegs128B:$Vd32),
(ins VectorRegs128B:$Vu32, VectorRegs128B:$Vv32),
"$Vd32.w = vrmpy($Vu32.b,$Vv32.b)",
-CVI_VX, TypeCVI_VX>, Enc_6223403, Requires<[HasV60T,UseHVX]> {
+tc_908a4c8c, TypeCVI_VX>, Enc_45364e, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b001;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011100000;
@@ -41650,7 +42011,7 @@ def V6_vrmpybv_acc : HInst<
(outs VectorRegs:$Vx32),
(ins VectorRegs:$Vx32in, VectorRegs:$Vu32, VectorRegs:$Vv32),
"$Vx32.w += vrmpy($Vu32.b,$Vv32.b)",
-CVI_VX_DV, TypeCVI_VX_DV>, Enc_2328527, Requires<[HasV60T,UseHVX]> {
+tc_e172d86a, TypeCVI_VX_DV>, Enc_a7341a, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b001;
let Inst{13-13} = 0b1;
let Inst{31-21} = 0b00011100000;
@@ -41664,7 +42025,7 @@ def V6_vrmpybv_acc_128B : HInst<
(outs VectorRegs128B:$Vx32),
(ins VectorRegs128B:$Vx32in, VectorRegs128B:$Vu32, VectorRegs128B:$Vv32),
"$Vx32.w += vrmpy($Vu32.b,$Vv32.b)",
-CVI_VX_DV, TypeCVI_VX_DV>, Enc_2328527, Requires<[HasV60T,UseHVX]> {
+tc_e172d86a, TypeCVI_VX_DV>, Enc_a7341a, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b001;
let Inst{13-13} = 0b1;
let Inst{31-21} = 0b00011100000;
@@ -41729,7 +42090,7 @@ def V6_vrmpyub : HInst<
(outs VectorRegs:$Vd32),
(ins VectorRegs:$Vu32, IntRegs:$Rt32),
"$Vd32.uw = vrmpy($Vu32.ub,$Rt32.ub)",
-CVI_VX, TypeCVI_VX>, Enc_16214129, Requires<[HasV60T,UseHVX]> {
+tc_69b6dd20, TypeCVI_VX>, Enc_b087ac, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b011;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011001000;
@@ -41741,7 +42102,7 @@ def V6_vrmpyub_128B : HInst<
(outs VectorRegs128B:$Vd32),
(ins VectorRegs128B:$Vu32, IntRegs:$Rt32),
"$Vd32.uw = vrmpy($Vu32.ub,$Rt32.ub)",
-CVI_VX, TypeCVI_VX>, Enc_16214129, Requires<[HasV60T,UseHVX]> {
+tc_69b6dd20, TypeCVI_VX>, Enc_b087ac, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b011;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011001000;
@@ -41754,7 +42115,7 @@ def V6_vrmpyub_acc : HInst<
(outs VectorRegs:$Vx32),
(ins VectorRegs:$Vx32in, VectorRegs:$Vu32, IntRegs:$Rt32),
"$Vx32.uw += vrmpy($Vu32.ub,$Rt32.ub)",
-CVI_VX, TypeCVI_VX>, Enc_10058269, Requires<[HasV60T,UseHVX]> {
+tc_d725e5b0, TypeCVI_VX>, Enc_5138b3, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b100;
let Inst{13-13} = 0b1;
let Inst{31-21} = 0b00011001000;
@@ -41768,7 +42129,7 @@ def V6_vrmpyub_acc_128B : HInst<
(outs VectorRegs128B:$Vx32),
(ins VectorRegs128B:$Vx32in, VectorRegs128B:$Vu32, IntRegs:$Rt32),
"$Vx32.uw += vrmpy($Vu32.ub,$Rt32.ub)",
-CVI_VX, TypeCVI_VX>, Enc_10058269, Requires<[HasV60T,UseHVX]> {
+tc_d725e5b0, TypeCVI_VX>, Enc_5138b3, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b100;
let Inst{13-13} = 0b1;
let Inst{31-21} = 0b00011001000;
@@ -41833,7 +42194,7 @@ def V6_vrmpyubi : HInst<
(outs VecDblRegs:$Vdd32),
(ins VecDblRegs:$Vuu32, IntRegs:$Rt32, u1_0Imm:$Ii),
"$Vdd32.uw = vrmpy($Vuu32.ub,$Rt32.ub,#$Ii)",
-CVI_VX_DV_LONG, TypeCVI_VX_DV>, Enc_14172170, Requires<[HasV60T,UseHVX]> {
+tc_7e9f581b, TypeCVI_VX_DV>, Enc_2f2f04, Requires<[HasV60T,UseHVX]> {
let Inst{7-6} = 0b11;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011001101;
@@ -41845,7 +42206,7 @@ def V6_vrmpyubi_128B : HInst<
(outs VecDblRegs128B:$Vdd32),
(ins VecDblRegs128B:$Vuu32, IntRegs:$Rt32, u1_0Imm:$Ii),
"$Vdd32.uw = vrmpy($Vuu32.ub,$Rt32.ub,#$Ii)",
-CVI_VX_DV_LONG, TypeCVI_VX_DV>, Enc_14172170, Requires<[HasV60T,UseHVX]> {
+tc_7e9f581b, TypeCVI_VX_DV>, Enc_2f2f04, Requires<[HasV60T,UseHVX]> {
let Inst{7-6} = 0b11;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011001101;
@@ -41858,7 +42219,7 @@ def V6_vrmpyubi_acc : HInst<
(outs VecDblRegs:$Vxx32),
(ins VecDblRegs:$Vxx32in, VecDblRegs:$Vuu32, IntRegs:$Rt32, u1_0Imm:$Ii),
"$Vxx32.uw += vrmpy($Vuu32.ub,$Rt32.ub,#$Ii)",
-CVI_VX_DV_LONG, TypeCVI_VX_DV>, Enc_13189194, Requires<[HasV60T,UseHVX]> {
+tc_41f99e1c, TypeCVI_VX_DV>, Enc_d483b9, Requires<[HasV60T,UseHVX]> {
let Inst{7-6} = 0b11;
let Inst{13-13} = 0b1;
let Inst{31-21} = 0b00011001011;
@@ -41872,7 +42233,7 @@ def V6_vrmpyubi_acc_128B : HInst<
(outs VecDblRegs128B:$Vxx32),
(ins VecDblRegs128B:$Vxx32in, VecDblRegs128B:$Vuu32, IntRegs:$Rt32, u1_0Imm:$Ii),
"$Vxx32.uw += vrmpy($Vuu32.ub,$Rt32.ub,#$Ii)",
-CVI_VX_DV_LONG, TypeCVI_VX_DV>, Enc_13189194, Requires<[HasV60T,UseHVX]> {
+tc_41f99e1c, TypeCVI_VX_DV>, Enc_d483b9, Requires<[HasV60T,UseHVX]> {
let Inst{7-6} = 0b11;
let Inst{13-13} = 0b1;
let Inst{31-21} = 0b00011001011;
@@ -41937,7 +42298,7 @@ def V6_vrmpyubv : HInst<
(outs VectorRegs:$Vd32),
(ins VectorRegs:$Vu32, VectorRegs:$Vv32),
"$Vd32.uw = vrmpy($Vu32.ub,$Vv32.ub)",
-CVI_VX, TypeCVI_VX>, Enc_6223403, Requires<[HasV60T,UseHVX]> {
+tc_908a4c8c, TypeCVI_VX>, Enc_45364e, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011100000;
@@ -41949,7 +42310,7 @@ def V6_vrmpyubv_128B : HInst<
(outs VectorRegs128B:$Vd32),
(ins VectorRegs128B:$Vu32, VectorRegs128B:$Vv32),
"$Vd32.uw = vrmpy($Vu32.ub,$Vv32.ub)",
-CVI_VX, TypeCVI_VX>, Enc_6223403, Requires<[HasV60T,UseHVX]> {
+tc_908a4c8c, TypeCVI_VX>, Enc_45364e, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011100000;
@@ -41962,7 +42323,7 @@ def V6_vrmpyubv_acc : HInst<
(outs VectorRegs:$Vx32),
(ins VectorRegs:$Vx32in, VectorRegs:$Vu32, VectorRegs:$Vv32),
"$Vx32.uw += vrmpy($Vu32.ub,$Vv32.ub)",
-CVI_VX_DV, TypeCVI_VX_DV>, Enc_2328527, Requires<[HasV60T,UseHVX]> {
+tc_e172d86a, TypeCVI_VX_DV>, Enc_a7341a, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b1;
let Inst{31-21} = 0b00011100000;
@@ -41976,7 +42337,7 @@ def V6_vrmpyubv_acc_128B : HInst<
(outs VectorRegs128B:$Vx32),
(ins VectorRegs128B:$Vx32in, VectorRegs128B:$Vu32, VectorRegs128B:$Vv32),
"$Vx32.uw += vrmpy($Vu32.ub,$Vv32.ub)",
-CVI_VX_DV, TypeCVI_VX_DV>, Enc_2328527, Requires<[HasV60T,UseHVX]> {
+tc_e172d86a, TypeCVI_VX_DV>, Enc_a7341a, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b1;
let Inst{31-21} = 0b00011100000;
@@ -42041,7 +42402,7 @@ def V6_vror : HInst<
(outs VectorRegs:$Vd32),
(ins VectorRegs:$Vu32, IntRegs:$Rt32),
"$Vd32 = vror($Vu32,$Rt32)",
-CVI_VP, TypeCVI_VP>, Enc_16214129, Requires<[HasV60T,UseHVX]> {
+tc_bf142ae2, TypeCVI_VP>, Enc_b087ac, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b001;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011001011;
@@ -42053,7 +42414,7 @@ def V6_vror_128B : HInst<
(outs VectorRegs128B:$Vd32),
(ins VectorRegs128B:$Vu32, IntRegs:$Rt32),
"$Vd32 = vror($Vu32,$Rt32)",
-CVI_VP, TypeCVI_VP>, Enc_16214129, Requires<[HasV60T,UseHVX]> {
+tc_bf142ae2, TypeCVI_VP>, Enc_b087ac, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b001;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011001011;
@@ -42066,7 +42427,7 @@ def V6_vroundhb : HInst<
(outs VectorRegs:$Vd32),
(ins VectorRegs:$Vu32, VectorRegs:$Vv32),
"$Vd32.b = vround($Vu32.h,$Vv32.h):sat",
-CVI_VS, TypeCVI_VS>, Enc_6223403, Requires<[HasV60T,UseHVX]> {
+tc_45453b98, TypeCVI_VS>, Enc_45364e, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b110;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011111011;
@@ -42078,7 +42439,7 @@ def V6_vroundhb_128B : HInst<
(outs VectorRegs128B:$Vd32),
(ins VectorRegs128B:$Vu32, VectorRegs128B:$Vv32),
"$Vd32.b = vround($Vu32.h,$Vv32.h):sat",
-CVI_VS, TypeCVI_VS>, Enc_6223403, Requires<[HasV60T,UseHVX]> {
+tc_45453b98, TypeCVI_VS>, Enc_45364e, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b110;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011111011;
@@ -42114,7 +42475,7 @@ def V6_vroundhub : HInst<
(outs VectorRegs:$Vd32),
(ins VectorRegs:$Vu32, VectorRegs:$Vv32),
"$Vd32.ub = vround($Vu32.h,$Vv32.h):sat",
-CVI_VS, TypeCVI_VS>, Enc_6223403, Requires<[HasV60T,UseHVX]> {
+tc_45453b98, TypeCVI_VS>, Enc_45364e, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b111;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011111011;
@@ -42126,7 +42487,7 @@ def V6_vroundhub_128B : HInst<
(outs VectorRegs128B:$Vd32),
(ins VectorRegs128B:$Vu32, VectorRegs128B:$Vv32),
"$Vd32.ub = vround($Vu32.h,$Vv32.h):sat",
-CVI_VS, TypeCVI_VS>, Enc_6223403, Requires<[HasV60T,UseHVX]> {
+tc_45453b98, TypeCVI_VS>, Enc_45364e, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b111;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011111011;
@@ -42162,7 +42523,7 @@ def V6_vrounduhub : HInst<
(outs VectorRegs:$Vd32),
(ins VectorRegs:$Vu32, VectorRegs:$Vv32),
"$Vd32.ub = vround($Vu32.uh,$Vv32.uh):sat",
-CVI_VS, TypeCVI_VS>, Enc_6223403, Requires<[HasV62T,UseHVX]> {
+tc_45453b98, TypeCVI_VS>, Enc_45364e, Requires<[HasV62T,UseHVX]> {
let Inst{7-5} = 0b011;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011111111;
@@ -42174,7 +42535,7 @@ def V6_vrounduhub_128B : HInst<
(outs VectorRegs128B:$Vd32),
(ins VectorRegs128B:$Vu32, VectorRegs128B:$Vv32),
"$Vd32.ub = vround($Vu32.uh,$Vv32.uh):sat",
-CVI_VS, TypeCVI_VS>, Enc_6223403, Requires<[HasV62T,UseHVX]> {
+tc_45453b98, TypeCVI_VS>, Enc_45364e, Requires<[HasV62T,UseHVX]> {
let Inst{7-5} = 0b011;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011111111;
@@ -42210,7 +42571,7 @@ def V6_vrounduwuh : HInst<
(outs VectorRegs:$Vd32),
(ins VectorRegs:$Vu32, VectorRegs:$Vv32),
"$Vd32.uh = vround($Vu32.uw,$Vv32.uw):sat",
-CVI_VS, TypeCVI_VS>, Enc_6223403, Requires<[HasV62T,UseHVX]> {
+tc_45453b98, TypeCVI_VS>, Enc_45364e, Requires<[HasV62T,UseHVX]> {
let Inst{7-5} = 0b100;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011111111;
@@ -42222,7 +42583,7 @@ def V6_vrounduwuh_128B : HInst<
(outs VectorRegs128B:$Vd32),
(ins VectorRegs128B:$Vu32, VectorRegs128B:$Vv32),
"$Vd32.uh = vround($Vu32.uw,$Vv32.uw):sat",
-CVI_VS, TypeCVI_VS>, Enc_6223403, Requires<[HasV62T,UseHVX]> {
+tc_45453b98, TypeCVI_VS>, Enc_45364e, Requires<[HasV62T,UseHVX]> {
let Inst{7-5} = 0b100;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011111111;
@@ -42258,7 +42619,7 @@ def V6_vroundwh : HInst<
(outs VectorRegs:$Vd32),
(ins VectorRegs:$Vu32, VectorRegs:$Vv32),
"$Vd32.h = vround($Vu32.w,$Vv32.w):sat",
-CVI_VS, TypeCVI_VS>, Enc_6223403, Requires<[HasV60T,UseHVX]> {
+tc_45453b98, TypeCVI_VS>, Enc_45364e, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b100;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011111011;
@@ -42270,7 +42631,7 @@ def V6_vroundwh_128B : HInst<
(outs VectorRegs128B:$Vd32),
(ins VectorRegs128B:$Vu32, VectorRegs128B:$Vv32),
"$Vd32.h = vround($Vu32.w,$Vv32.w):sat",
-CVI_VS, TypeCVI_VS>, Enc_6223403, Requires<[HasV60T,UseHVX]> {
+tc_45453b98, TypeCVI_VS>, Enc_45364e, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b100;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011111011;
@@ -42306,7 +42667,7 @@ def V6_vroundwuh : HInst<
(outs VectorRegs:$Vd32),
(ins VectorRegs:$Vu32, VectorRegs:$Vv32),
"$Vd32.uh = vround($Vu32.w,$Vv32.w):sat",
-CVI_VS, TypeCVI_VS>, Enc_6223403, Requires<[HasV60T,UseHVX]> {
+tc_45453b98, TypeCVI_VS>, Enc_45364e, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b101;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011111011;
@@ -42318,7 +42679,7 @@ def V6_vroundwuh_128B : HInst<
(outs VectorRegs128B:$Vd32),
(ins VectorRegs128B:$Vu32, VectorRegs128B:$Vv32),
"$Vd32.uh = vround($Vu32.w,$Vv32.w):sat",
-CVI_VS, TypeCVI_VS>, Enc_6223403, Requires<[HasV60T,UseHVX]> {
+tc_45453b98, TypeCVI_VS>, Enc_45364e, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b101;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011111011;
@@ -42354,7 +42715,7 @@ def V6_vrsadubi : HInst<
(outs VecDblRegs:$Vdd32),
(ins VecDblRegs:$Vuu32, IntRegs:$Rt32, u1_0Imm:$Ii),
"$Vdd32.uw = vrsad($Vuu32.ub,$Rt32.ub,#$Ii)",
-CVI_VX_DV_LONG, TypeCVI_VX_DV>, Enc_14172170, Requires<[HasV60T,UseHVX]> {
+tc_7e9f581b, TypeCVI_VX_DV>, Enc_2f2f04, Requires<[HasV60T,UseHVX]> {
let Inst{7-6} = 0b11;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011001010;
@@ -42366,7 +42727,7 @@ def V6_vrsadubi_128B : HInst<
(outs VecDblRegs128B:$Vdd32),
(ins VecDblRegs128B:$Vuu32, IntRegs:$Rt32, u1_0Imm:$Ii),
"$Vdd32.uw = vrsad($Vuu32.ub,$Rt32.ub,#$Ii)",
-CVI_VX_DV_LONG, TypeCVI_VX_DV>, Enc_14172170, Requires<[HasV60T,UseHVX]> {
+tc_7e9f581b, TypeCVI_VX_DV>, Enc_2f2f04, Requires<[HasV60T,UseHVX]> {
let Inst{7-6} = 0b11;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011001010;
@@ -42379,7 +42740,7 @@ def V6_vrsadubi_acc : HInst<
(outs VecDblRegs:$Vxx32),
(ins VecDblRegs:$Vxx32in, VecDblRegs:$Vuu32, IntRegs:$Rt32, u1_0Imm:$Ii),
"$Vxx32.uw += vrsad($Vuu32.ub,$Rt32.ub,#$Ii)",
-CVI_VX_DV_LONG, TypeCVI_VX_DV>, Enc_13189194, Requires<[HasV60T,UseHVX]> {
+tc_41f99e1c, TypeCVI_VX_DV>, Enc_d483b9, Requires<[HasV60T,UseHVX]> {
let Inst{7-6} = 0b11;
let Inst{13-13} = 0b1;
let Inst{31-21} = 0b00011001010;
@@ -42393,7 +42754,7 @@ def V6_vrsadubi_acc_128B : HInst<
(outs VecDblRegs128B:$Vxx32),
(ins VecDblRegs128B:$Vxx32in, VecDblRegs128B:$Vuu32, IntRegs:$Rt32, u1_0Imm:$Ii),
"$Vxx32.uw += vrsad($Vuu32.ub,$Rt32.ub,#$Ii)",
-CVI_VX_DV_LONG, TypeCVI_VX_DV>, Enc_13189194, Requires<[HasV60T,UseHVX]> {
+tc_41f99e1c, TypeCVI_VX_DV>, Enc_d483b9, Requires<[HasV60T,UseHVX]> {
let Inst{7-6} = 0b11;
let Inst{13-13} = 0b1;
let Inst{31-21} = 0b00011001010;
@@ -42458,7 +42819,7 @@ def V6_vsathub : HInst<
(outs VectorRegs:$Vd32),
(ins VectorRegs:$Vu32, VectorRegs:$Vv32),
"$Vd32.ub = vsat($Vu32.h,$Vv32.h)",
-CVI_VINLANESAT, TypeCVI_VINLANESAT>, Enc_6223403, Requires<[HasV60T,UseHVX]> {
+tc_9b9642a1, TypeCVI_VINLANESAT>, Enc_45364e, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b010;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011111011;
@@ -42470,7 +42831,7 @@ def V6_vsathub_128B : HInst<
(outs VectorRegs128B:$Vd32),
(ins VectorRegs128B:$Vu32, VectorRegs128B:$Vv32),
"$Vd32.ub = vsat($Vu32.h,$Vv32.h)",
-CVI_VINLANESAT, TypeCVI_VINLANESAT>, Enc_6223403, Requires<[HasV60T,UseHVX]> {
+tc_9b9642a1, TypeCVI_VINLANESAT>, Enc_45364e, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b010;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011111011;
@@ -42506,7 +42867,7 @@ def V6_vsatuwuh : HInst<
(outs VectorRegs:$Vd32),
(ins VectorRegs:$Vu32, VectorRegs:$Vv32),
"$Vd32.uh = vsat($Vu32.uw,$Vv32.uw)",
-CVI_VA, TypeCVI_VA>, Enc_6223403, Requires<[HasV62T,UseHVX]> {
+tc_bbaf280e, TypeCVI_VA>, Enc_45364e, Requires<[HasV62T,UseHVX]> {
let Inst{7-5} = 0b110;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011111001;
@@ -42518,7 +42879,7 @@ def V6_vsatuwuh_128B : HInst<
(outs VectorRegs128B:$Vd32),
(ins VectorRegs128B:$Vu32, VectorRegs128B:$Vv32),
"$Vd32.uh = vsat($Vu32.uw,$Vv32.uw)",
-CVI_VA, TypeCVI_VA>, Enc_6223403, Requires<[HasV62T,UseHVX]> {
+tc_bbaf280e, TypeCVI_VA>, Enc_45364e, Requires<[HasV62T,UseHVX]> {
let Inst{7-5} = 0b110;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011111001;
@@ -42554,7 +42915,7 @@ def V6_vsatwh : HInst<
(outs VectorRegs:$Vd32),
(ins VectorRegs:$Vu32, VectorRegs:$Vv32),
"$Vd32.h = vsat($Vu32.w,$Vv32.w)",
-CVI_VINLANESAT, TypeCVI_VINLANESAT>, Enc_6223403, Requires<[HasV60T,UseHVX]> {
+tc_9b9642a1, TypeCVI_VINLANESAT>, Enc_45364e, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b011;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011111011;
@@ -42566,7 +42927,7 @@ def V6_vsatwh_128B : HInst<
(outs VectorRegs128B:$Vd32),
(ins VectorRegs128B:$Vu32, VectorRegs128B:$Vv32),
"$Vd32.h = vsat($Vu32.w,$Vv32.w)",
-CVI_VINLANESAT, TypeCVI_VINLANESAT>, Enc_6223403, Requires<[HasV60T,UseHVX]> {
+tc_9b9642a1, TypeCVI_VINLANESAT>, Enc_45364e, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b011;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011111011;
@@ -42602,7 +42963,7 @@ def V6_vsb : HInst<
(outs VecDblRegs:$Vdd32),
(ins VectorRegs:$Vu32),
"$Vdd32.h = vsxt($Vu32.b)",
-CVI_VA_DV, TypeCVI_VA_DV>, Enc_14631806, Requires<[HasV60T,UseHVX]> {
+tc_644584f8, TypeCVI_VA_DV>, Enc_dd766a, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b011;
let Inst{13-13} = 0b0;
let Inst{31-16} = 0b0001111000000010;
@@ -42614,7 +42975,7 @@ def V6_vsb_128B : HInst<
(outs VecDblRegs128B:$Vdd32),
(ins VectorRegs128B:$Vu32),
"$Vdd32.h = vsxt($Vu32.b)",
-CVI_VA_DV, TypeCVI_VA_DV>, Enc_14631806, Requires<[HasV60T,UseHVX]> {
+tc_644584f8, TypeCVI_VA_DV>, Enc_dd766a, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b011;
let Inst{13-13} = 0b0;
let Inst{31-16} = 0b0001111000000010;
@@ -42650,7 +43011,7 @@ def V6_vsh : HInst<
(outs VecDblRegs:$Vdd32),
(ins VectorRegs:$Vu32),
"$Vdd32.w = vsxt($Vu32.h)",
-CVI_VA_DV, TypeCVI_VA_DV>, Enc_14631806, Requires<[HasV60T,UseHVX]> {
+tc_644584f8, TypeCVI_VA_DV>, Enc_dd766a, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b100;
let Inst{13-13} = 0b0;
let Inst{31-16} = 0b0001111000000010;
@@ -42662,7 +43023,7 @@ def V6_vsh_128B : HInst<
(outs VecDblRegs128B:$Vdd32),
(ins VectorRegs128B:$Vu32),
"$Vdd32.w = vsxt($Vu32.h)",
-CVI_VA_DV, TypeCVI_VA_DV>, Enc_14631806, Requires<[HasV60T,UseHVX]> {
+tc_644584f8, TypeCVI_VA_DV>, Enc_dd766a, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b100;
let Inst{13-13} = 0b0;
let Inst{31-16} = 0b0001111000000010;
@@ -42698,7 +43059,7 @@ def V6_vshufeh : HInst<
(outs VectorRegs:$Vd32),
(ins VectorRegs:$Vu32, VectorRegs:$Vv32),
"$Vd32.h = vshuffe($Vu32.h,$Vv32.h)",
-CVI_VA, TypeCVI_VA>, Enc_6223403, Requires<[HasV60T,UseHVX]> {
+tc_bbaf280e, TypeCVI_VA>, Enc_45364e, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b011;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011111010;
@@ -42710,7 +43071,7 @@ def V6_vshufeh_128B : HInst<
(outs VectorRegs128B:$Vd32),
(ins VectorRegs128B:$Vu32, VectorRegs128B:$Vv32),
"$Vd32.h = vshuffe($Vu32.h,$Vv32.h)",
-CVI_VA, TypeCVI_VA>, Enc_6223403, Requires<[HasV60T,UseHVX]> {
+tc_bbaf280e, TypeCVI_VA>, Enc_45364e, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b011;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011111010;
@@ -42746,7 +43107,7 @@ def V6_vshuff : HInst<
(outs VectorRegs:$Vy32, VectorRegs:$Vx32),
(ins VectorRegs:$Vy32in, VectorRegs:$Vx32in, IntRegs:$Rt32),
"vshuff($Vy32,$Vx32,$Rt32)",
-CVI_VP_VS_LONG_EARLY, TypeCVI_VP_VS>, Enc_11422009, Requires<[HasV60T,UseHVX]> {
+tc_5c120602, TypeCVI_VP_VS>, Enc_989021, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b001;
let Inst{13-13} = 0b1;
let Inst{31-21} = 0b00011001111;
@@ -42761,7 +43122,7 @@ def V6_vshuff_128B : HInst<
(outs VectorRegs128B:$Vy32, VectorRegs128B:$Vx32),
(ins VectorRegs128B:$Vy32in, VectorRegs128B:$Vx32in, IntRegs:$Rt32),
"vshuff($Vy32,$Vx32,$Rt32)",
-CVI_VP_VS_LONG_EARLY, TypeCVI_VP_VS>, Enc_11422009, Requires<[HasV60T,UseHVX]> {
+tc_5c120602, TypeCVI_VP_VS>, Enc_989021, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b001;
let Inst{13-13} = 0b1;
let Inst{31-21} = 0b00011001111;
@@ -42777,7 +43138,7 @@ def V6_vshuffb : HInst<
(outs VectorRegs:$Vd32),
(ins VectorRegs:$Vu32),
"$Vd32.b = vshuff($Vu32.b)",
-CVI_VP, TypeCVI_VP>, Enc_900013, Requires<[HasV60T,UseHVX]> {
+tc_e6299d16, TypeCVI_VP>, Enc_e7581c, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b0;
let Inst{31-16} = 0b0001111000000010;
@@ -42789,7 +43150,7 @@ def V6_vshuffb_128B : HInst<
(outs VectorRegs128B:$Vd32),
(ins VectorRegs128B:$Vu32),
"$Vd32.b = vshuff($Vu32.b)",
-CVI_VP, TypeCVI_VP>, Enc_900013, Requires<[HasV60T,UseHVX]> {
+tc_e6299d16, TypeCVI_VP>, Enc_e7581c, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b0;
let Inst{31-16} = 0b0001111000000010;
@@ -42825,7 +43186,7 @@ def V6_vshuffeb : HInst<
(outs VectorRegs:$Vd32),
(ins VectorRegs:$Vu32, VectorRegs:$Vv32),
"$Vd32.b = vshuffe($Vu32.b,$Vv32.b)",
-CVI_VA, TypeCVI_VA>, Enc_6223403, Requires<[HasV60T,UseHVX]> {
+tc_bbaf280e, TypeCVI_VA>, Enc_45364e, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b001;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011111010;
@@ -42837,7 +43198,7 @@ def V6_vshuffeb_128B : HInst<
(outs VectorRegs128B:$Vd32),
(ins VectorRegs128B:$Vu32, VectorRegs128B:$Vv32),
"$Vd32.b = vshuffe($Vu32.b,$Vv32.b)",
-CVI_VA, TypeCVI_VA>, Enc_6223403, Requires<[HasV60T,UseHVX]> {
+tc_bbaf280e, TypeCVI_VA>, Enc_45364e, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b001;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011111010;
@@ -42873,7 +43234,7 @@ def V6_vshuffh : HInst<
(outs VectorRegs:$Vd32),
(ins VectorRegs:$Vu32),
"$Vd32.h = vshuff($Vu32.h)",
-CVI_VP, TypeCVI_VP>, Enc_900013, Requires<[HasV60T,UseHVX]> {
+tc_e6299d16, TypeCVI_VP>, Enc_e7581c, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b111;
let Inst{13-13} = 0b0;
let Inst{31-16} = 0b0001111000000001;
@@ -42885,7 +43246,7 @@ def V6_vshuffh_128B : HInst<
(outs VectorRegs128B:$Vd32),
(ins VectorRegs128B:$Vu32),
"$Vd32.h = vshuff($Vu32.h)",
-CVI_VP, TypeCVI_VP>, Enc_900013, Requires<[HasV60T,UseHVX]> {
+tc_e6299d16, TypeCVI_VP>, Enc_e7581c, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b111;
let Inst{13-13} = 0b0;
let Inst{31-16} = 0b0001111000000001;
@@ -42921,7 +43282,7 @@ def V6_vshuffob : HInst<
(outs VectorRegs:$Vd32),
(ins VectorRegs:$Vu32, VectorRegs:$Vv32),
"$Vd32.b = vshuffo($Vu32.b,$Vv32.b)",
-CVI_VA, TypeCVI_VA>, Enc_6223403, Requires<[HasV60T,UseHVX]> {
+tc_bbaf280e, TypeCVI_VA>, Enc_45364e, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b010;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011111010;
@@ -42933,7 +43294,7 @@ def V6_vshuffob_128B : HInst<
(outs VectorRegs128B:$Vd32),
(ins VectorRegs128B:$Vu32, VectorRegs128B:$Vv32),
"$Vd32.b = vshuffo($Vu32.b,$Vv32.b)",
-CVI_VA, TypeCVI_VA>, Enc_6223403, Requires<[HasV60T,UseHVX]> {
+tc_bbaf280e, TypeCVI_VA>, Enc_45364e, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b010;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011111010;
@@ -42969,7 +43330,7 @@ def V6_vshuffvdd : HInst<
(outs VecDblRegs:$Vdd32),
(ins VectorRegs:$Vu32, VectorRegs:$Vv32, IntRegsLow8:$Rt8),
"$Vdd32 = vshuff($Vu32,$Vv32,$Rt8)",
-CVI_VP_VS_LONG, TypeCVI_VP_VS>, Enc_14767681, Requires<[HasV60T,UseHVX]> {
+tc_4e2a5159, TypeCVI_VP_VS>, Enc_24a7dc, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b011;
let Inst{13-13} = 0b1;
let Inst{31-24} = 0b00011011;
@@ -42981,7 +43342,7 @@ def V6_vshuffvdd_128B : HInst<
(outs VecDblRegs128B:$Vdd32),
(ins VectorRegs128B:$Vu32, VectorRegs128B:$Vv32, IntRegsLow8:$Rt8),
"$Vdd32 = vshuff($Vu32,$Vv32,$Rt8)",
-CVI_VP_VS_LONG, TypeCVI_VP_VS>, Enc_14767681, Requires<[HasV60T,UseHVX]> {
+tc_4e2a5159, TypeCVI_VP_VS>, Enc_24a7dc, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b011;
let Inst{13-13} = 0b1;
let Inst{31-24} = 0b00011011;
@@ -42994,7 +43355,7 @@ def V6_vshufoeb : HInst<
(outs VecDblRegs:$Vdd32),
(ins VectorRegs:$Vu32, VectorRegs:$Vv32),
"$Vdd32.b = vshuffoe($Vu32.b,$Vv32.b)",
-CVI_VA_DV, TypeCVI_VA_DV>, Enc_15290236, Requires<[HasV60T,UseHVX]> {
+tc_97c165b9, TypeCVI_VA_DV>, Enc_71bb9b, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b110;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011111010;
@@ -43006,7 +43367,7 @@ def V6_vshufoeb_128B : HInst<
(outs VecDblRegs128B:$Vdd32),
(ins VectorRegs128B:$Vu32, VectorRegs128B:$Vv32),
"$Vdd32.b = vshuffoe($Vu32.b,$Vv32.b)",
-CVI_VA_DV, TypeCVI_VA_DV>, Enc_15290236, Requires<[HasV60T,UseHVX]> {
+tc_97c165b9, TypeCVI_VA_DV>, Enc_71bb9b, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b110;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011111010;
@@ -43042,7 +43403,7 @@ def V6_vshufoeh : HInst<
(outs VecDblRegs:$Vdd32),
(ins VectorRegs:$Vu32, VectorRegs:$Vv32),
"$Vdd32.h = vshuffoe($Vu32.h,$Vv32.h)",
-CVI_VA_DV, TypeCVI_VA_DV>, Enc_15290236, Requires<[HasV60T,UseHVX]> {
+tc_97c165b9, TypeCVI_VA_DV>, Enc_71bb9b, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b101;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011111010;
@@ -43054,7 +43415,7 @@ def V6_vshufoeh_128B : HInst<
(outs VecDblRegs128B:$Vdd32),
(ins VectorRegs128B:$Vu32, VectorRegs128B:$Vv32),
"$Vdd32.h = vshuffoe($Vu32.h,$Vv32.h)",
-CVI_VA_DV, TypeCVI_VA_DV>, Enc_15290236, Requires<[HasV60T,UseHVX]> {
+tc_97c165b9, TypeCVI_VA_DV>, Enc_71bb9b, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b101;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011111010;
@@ -43090,7 +43451,7 @@ def V6_vshufoh : HInst<
(outs VectorRegs:$Vd32),
(ins VectorRegs:$Vu32, VectorRegs:$Vv32),
"$Vd32.h = vshuffo($Vu32.h,$Vv32.h)",
-CVI_VA, TypeCVI_VA>, Enc_6223403, Requires<[HasV60T,UseHVX]> {
+tc_bbaf280e, TypeCVI_VA>, Enc_45364e, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b100;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011111010;
@@ -43102,7 +43463,7 @@ def V6_vshufoh_128B : HInst<
(outs VectorRegs128B:$Vd32),
(ins VectorRegs128B:$Vu32, VectorRegs128B:$Vv32),
"$Vd32.h = vshuffo($Vu32.h,$Vv32.h)",
-CVI_VA, TypeCVI_VA>, Enc_6223403, Requires<[HasV60T,UseHVX]> {
+tc_bbaf280e, TypeCVI_VA>, Enc_45364e, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b100;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011111010;
@@ -43138,7 +43499,7 @@ def V6_vsubb : HInst<
(outs VectorRegs:$Vd32),
(ins VectorRegs:$Vu32, VectorRegs:$Vv32),
"$Vd32.b = vsub($Vu32.b,$Vv32.b)",
-CVI_VA, TypeCVI_VA>, Enc_6223403, Requires<[HasV60T,UseHVX]> {
+tc_bbaf280e, TypeCVI_VA>, Enc_45364e, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b101;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011100010;
@@ -43150,7 +43511,7 @@ def V6_vsubb_128B : HInst<
(outs VectorRegs128B:$Vd32),
(ins VectorRegs128B:$Vu32, VectorRegs128B:$Vv32),
"$Vd32.b = vsub($Vu32.b,$Vv32.b)",
-CVI_VA, TypeCVI_VA>, Enc_6223403, Requires<[HasV60T,UseHVX]> {
+tc_bbaf280e, TypeCVI_VA>, Enc_45364e, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b101;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011100010;
@@ -43186,7 +43547,7 @@ def V6_vsubb_dv : HInst<
(outs VecDblRegs:$Vdd32),
(ins VecDblRegs:$Vuu32, VecDblRegs:$Vvv32),
"$Vdd32.b = vsub($Vuu32.b,$Vvv32.b)",
-CVI_VA_DV, TypeCVI_VA_DV>, Enc_13211717, Requires<[HasV60T,UseHVX]> {
+tc_97c165b9, TypeCVI_VA_DV>, Enc_f8ecf9, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b011;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011100100;
@@ -43198,7 +43559,7 @@ def V6_vsubb_dv_128B : HInst<
(outs VecDblRegs128B:$Vdd32),
(ins VecDblRegs128B:$Vuu32, VecDblRegs128B:$Vvv32),
"$Vdd32.b = vsub($Vuu32.b,$Vvv32.b)",
-CVI_VA_DV, TypeCVI_VA_DV>, Enc_13211717, Requires<[HasV60T,UseHVX]> {
+tc_97c165b9, TypeCVI_VA_DV>, Enc_f8ecf9, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b011;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011100100;
@@ -43234,7 +43595,7 @@ def V6_vsubbnq : HInst<
(outs VectorRegs:$Vx32),
(ins VecPredRegs:$Qv4, VectorRegs:$Vx32in, VectorRegs:$Vu32),
"if (!$Qv4) $Vx32.b -= $Vu32.b",
-CVI_VA, TypeCVI_VA>, Enc_12535811, Requires<[HasV60T,UseHVX]> {
+tc_a3127e12, TypeCVI_VA>, Enc_a90628, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b001;
let Inst{13-13} = 0b1;
let Inst{21-16} = 0b000010;
@@ -43248,7 +43609,7 @@ def V6_vsubbnq_128B : HInst<
(outs VectorRegs128B:$Vx32),
(ins VecPredRegs128B:$Qv4, VectorRegs128B:$Vx32in, VectorRegs128B:$Vu32),
"if (!$Qv4) $Vx32.b -= $Vu32.b",
-CVI_VA, TypeCVI_VA>, Enc_12535811, Requires<[HasV60T,UseHVX]> {
+tc_a3127e12, TypeCVI_VA>, Enc_a90628, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b001;
let Inst{13-13} = 0b1;
let Inst{21-16} = 0b000010;
@@ -43288,7 +43649,7 @@ def V6_vsubbq : HInst<
(outs VectorRegs:$Vx32),
(ins VecPredRegs:$Qv4, VectorRegs:$Vx32in, VectorRegs:$Vu32),
"if ($Qv4) $Vx32.b -= $Vu32.b",
-CVI_VA, TypeCVI_VA>, Enc_12535811, Requires<[HasV60T,UseHVX]> {
+tc_a3127e12, TypeCVI_VA>, Enc_a90628, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b110;
let Inst{13-13} = 0b1;
let Inst{21-16} = 0b000001;
@@ -43302,7 +43663,7 @@ def V6_vsubbq_128B : HInst<
(outs VectorRegs128B:$Vx32),
(ins VecPredRegs128B:$Qv4, VectorRegs128B:$Vx32in, VectorRegs128B:$Vu32),
"if ($Qv4) $Vx32.b -= $Vu32.b",
-CVI_VA, TypeCVI_VA>, Enc_12535811, Requires<[HasV60T,UseHVX]> {
+tc_a3127e12, TypeCVI_VA>, Enc_a90628, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b110;
let Inst{13-13} = 0b1;
let Inst{21-16} = 0b000001;
@@ -43342,7 +43703,7 @@ def V6_vsubbsat : HInst<
(outs VectorRegs:$Vd32),
(ins VectorRegs:$Vu32, VectorRegs:$Vv32),
"$Vd32.b = vsub($Vu32.b,$Vv32.b):sat",
-CVI_VA, TypeCVI_VA>, Enc_6223403, Requires<[HasV62T,UseHVX]> {
+tc_bbaf280e, TypeCVI_VA>, Enc_45364e, Requires<[HasV62T,UseHVX]> {
let Inst{7-5} = 0b010;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011111001;
@@ -43354,7 +43715,7 @@ def V6_vsubbsat_128B : HInst<
(outs VectorRegs128B:$Vd32),
(ins VectorRegs128B:$Vu32, VectorRegs128B:$Vv32),
"$Vd32.b = vsub($Vu32.b,$Vv32.b):sat",
-CVI_VA, TypeCVI_VA>, Enc_6223403, Requires<[HasV62T,UseHVX]> {
+tc_bbaf280e, TypeCVI_VA>, Enc_45364e, Requires<[HasV62T,UseHVX]> {
let Inst{7-5} = 0b010;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011111001;
@@ -43390,7 +43751,7 @@ def V6_vsubbsat_dv : HInst<
(outs VecDblRegs:$Vdd32),
(ins VecDblRegs:$Vuu32, VecDblRegs:$Vvv32),
"$Vdd32.b = vsub($Vuu32.b,$Vvv32.b):sat",
-CVI_VA_DV, TypeCVI_VA_DV>, Enc_13211717, Requires<[HasV62T,UseHVX]> {
+tc_97c165b9, TypeCVI_VA_DV>, Enc_f8ecf9, Requires<[HasV62T,UseHVX]> {
let Inst{7-5} = 0b001;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011110101;
@@ -43402,7 +43763,7 @@ def V6_vsubbsat_dv_128B : HInst<
(outs VecDblRegs128B:$Vdd32),
(ins VecDblRegs128B:$Vuu32, VecDblRegs128B:$Vvv32),
"$Vdd32.b = vsub($Vuu32.b,$Vvv32.b):sat",
-CVI_VA_DV, TypeCVI_VA_DV>, Enc_13211717, Requires<[HasV62T,UseHVX]> {
+tc_97c165b9, TypeCVI_VA_DV>, Enc_f8ecf9, Requires<[HasV62T,UseHVX]> {
let Inst{7-5} = 0b001;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011110101;
@@ -43438,7 +43799,7 @@ def V6_vsubcarry : HInst<
(outs VectorRegs:$Vd32, VecPredRegs:$Qx4),
(ins VectorRegs:$Vu32, VectorRegs:$Vv32, VecPredRegs:$Qx4in),
"$Vd32.w = vsub($Vu32.w,$Vv32.w,$Qx4):carry",
-CVI_VA, TypeCVI_VA>, Enc_13691337, Requires<[HasV62T,UseHVX]> {
+tc_5a9fc4ec, TypeCVI_VA>, Enc_b43b67, Requires<[HasV62T,UseHVX]> {
let Inst{7-7} = 0b1;
let Inst{13-13} = 0b1;
let Inst{31-21} = 0b00011100101;
@@ -43453,7 +43814,7 @@ def V6_vsubcarry_128B : HInst<
(outs VectorRegs128B:$Vd32, VecPredRegs128B:$Qx4),
(ins VectorRegs128B:$Vu32, VectorRegs128B:$Vv32, VecPredRegs128B:$Qx4in),
"$Vd32.w = vsub($Vu32.w,$Vv32.w,$Qx4):carry",
-CVI_VA, TypeCVI_VA>, Enc_13691337, Requires<[HasV62T,UseHVX]> {
+tc_5a9fc4ec, TypeCVI_VA>, Enc_b43b67, Requires<[HasV62T,UseHVX]> {
let Inst{7-7} = 0b1;
let Inst{13-13} = 0b1;
let Inst{31-21} = 0b00011100101;
@@ -43469,7 +43830,7 @@ def V6_vsubh : HInst<
(outs VectorRegs:$Vd32),
(ins VectorRegs:$Vu32, VectorRegs:$Vv32),
"$Vd32.h = vsub($Vu32.h,$Vv32.h)",
-CVI_VA, TypeCVI_VA>, Enc_6223403, Requires<[HasV60T,UseHVX]> {
+tc_bbaf280e, TypeCVI_VA>, Enc_45364e, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b110;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011100010;
@@ -43481,7 +43842,7 @@ def V6_vsubh_128B : HInst<
(outs VectorRegs128B:$Vd32),
(ins VectorRegs128B:$Vu32, VectorRegs128B:$Vv32),
"$Vd32.h = vsub($Vu32.h,$Vv32.h)",
-CVI_VA, TypeCVI_VA>, Enc_6223403, Requires<[HasV60T,UseHVX]> {
+tc_bbaf280e, TypeCVI_VA>, Enc_45364e, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b110;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011100010;
@@ -43517,7 +43878,7 @@ def V6_vsubh_dv : HInst<
(outs VecDblRegs:$Vdd32),
(ins VecDblRegs:$Vuu32, VecDblRegs:$Vvv32),
"$Vdd32.h = vsub($Vuu32.h,$Vvv32.h)",
-CVI_VA_DV, TypeCVI_VA_DV>, Enc_13211717, Requires<[HasV60T,UseHVX]> {
+tc_97c165b9, TypeCVI_VA_DV>, Enc_f8ecf9, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b100;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011100100;
@@ -43529,7 +43890,7 @@ def V6_vsubh_dv_128B : HInst<
(outs VecDblRegs128B:$Vdd32),
(ins VecDblRegs128B:$Vuu32, VecDblRegs128B:$Vvv32),
"$Vdd32.h = vsub($Vuu32.h,$Vvv32.h)",
-CVI_VA_DV, TypeCVI_VA_DV>, Enc_13211717, Requires<[HasV60T,UseHVX]> {
+tc_97c165b9, TypeCVI_VA_DV>, Enc_f8ecf9, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b100;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011100100;
@@ -43565,7 +43926,7 @@ def V6_vsubhnq : HInst<
(outs VectorRegs:$Vx32),
(ins VecPredRegs:$Qv4, VectorRegs:$Vx32in, VectorRegs:$Vu32),
"if (!$Qv4) $Vx32.h -= $Vu32.h",
-CVI_VA, TypeCVI_VA>, Enc_12535811, Requires<[HasV60T,UseHVX]> {
+tc_a3127e12, TypeCVI_VA>, Enc_a90628, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b010;
let Inst{13-13} = 0b1;
let Inst{21-16} = 0b000010;
@@ -43579,7 +43940,7 @@ def V6_vsubhnq_128B : HInst<
(outs VectorRegs128B:$Vx32),
(ins VecPredRegs128B:$Qv4, VectorRegs128B:$Vx32in, VectorRegs128B:$Vu32),
"if (!$Qv4) $Vx32.h -= $Vu32.h",
-CVI_VA, TypeCVI_VA>, Enc_12535811, Requires<[HasV60T,UseHVX]> {
+tc_a3127e12, TypeCVI_VA>, Enc_a90628, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b010;
let Inst{13-13} = 0b1;
let Inst{21-16} = 0b000010;
@@ -43619,7 +43980,7 @@ def V6_vsubhq : HInst<
(outs VectorRegs:$Vx32),
(ins VecPredRegs:$Qv4, VectorRegs:$Vx32in, VectorRegs:$Vu32),
"if ($Qv4) $Vx32.h -= $Vu32.h",
-CVI_VA, TypeCVI_VA>, Enc_12535811, Requires<[HasV60T,UseHVX]> {
+tc_a3127e12, TypeCVI_VA>, Enc_a90628, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b111;
let Inst{13-13} = 0b1;
let Inst{21-16} = 0b000001;
@@ -43633,7 +43994,7 @@ def V6_vsubhq_128B : HInst<
(outs VectorRegs128B:$Vx32),
(ins VecPredRegs128B:$Qv4, VectorRegs128B:$Vx32in, VectorRegs128B:$Vu32),
"if ($Qv4) $Vx32.h -= $Vu32.h",
-CVI_VA, TypeCVI_VA>, Enc_12535811, Requires<[HasV60T,UseHVX]> {
+tc_a3127e12, TypeCVI_VA>, Enc_a90628, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b111;
let Inst{13-13} = 0b1;
let Inst{21-16} = 0b000001;
@@ -43673,7 +44034,7 @@ def V6_vsubhsat : HInst<
(outs VectorRegs:$Vd32),
(ins VectorRegs:$Vu32, VectorRegs:$Vv32),
"$Vd32.h = vsub($Vu32.h,$Vv32.h):sat",
-CVI_VA, TypeCVI_VA>, Enc_6223403, Requires<[HasV60T,UseHVX]> {
+tc_bbaf280e, TypeCVI_VA>, Enc_45364e, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b010;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011100011;
@@ -43685,7 +44046,7 @@ def V6_vsubhsat_128B : HInst<
(outs VectorRegs128B:$Vd32),
(ins VectorRegs128B:$Vu32, VectorRegs128B:$Vv32),
"$Vd32.h = vsub($Vu32.h,$Vv32.h):sat",
-CVI_VA, TypeCVI_VA>, Enc_6223403, Requires<[HasV60T,UseHVX]> {
+tc_bbaf280e, TypeCVI_VA>, Enc_45364e, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b010;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011100011;
@@ -43721,7 +44082,7 @@ def V6_vsubhsat_dv : HInst<
(outs VecDblRegs:$Vdd32),
(ins VecDblRegs:$Vuu32, VecDblRegs:$Vvv32),
"$Vdd32.h = vsub($Vuu32.h,$Vvv32.h):sat",
-CVI_VA_DV, TypeCVI_VA_DV>, Enc_13211717, Requires<[HasV60T,UseHVX]> {
+tc_97c165b9, TypeCVI_VA_DV>, Enc_f8ecf9, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011100101;
@@ -43733,7 +44094,7 @@ def V6_vsubhsat_dv_128B : HInst<
(outs VecDblRegs128B:$Vdd32),
(ins VecDblRegs128B:$Vuu32, VecDblRegs128B:$Vvv32),
"$Vdd32.h = vsub($Vuu32.h,$Vvv32.h):sat",
-CVI_VA_DV, TypeCVI_VA_DV>, Enc_13211717, Requires<[HasV60T,UseHVX]> {
+tc_97c165b9, TypeCVI_VA_DV>, Enc_f8ecf9, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011100101;
@@ -43769,7 +44130,7 @@ def V6_vsubhw : HInst<
(outs VecDblRegs:$Vdd32),
(ins VectorRegs:$Vu32, VectorRegs:$Vv32),
"$Vdd32.w = vsub($Vu32.h,$Vv32.h)",
-CVI_VX_DV, TypeCVI_VX_DV>, Enc_15290236, Requires<[HasV60T,UseHVX]> {
+tc_eda67dcd, TypeCVI_VX_DV>, Enc_71bb9b, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b111;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011100101;
@@ -43781,7 +44142,7 @@ def V6_vsubhw_128B : HInst<
(outs VecDblRegs128B:$Vdd32),
(ins VectorRegs128B:$Vu32, VectorRegs128B:$Vv32),
"$Vdd32.w = vsub($Vu32.h,$Vv32.h)",
-CVI_VX_DV, TypeCVI_VX_DV>, Enc_15290236, Requires<[HasV60T,UseHVX]> {
+tc_eda67dcd, TypeCVI_VX_DV>, Enc_71bb9b, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b111;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011100101;
@@ -43817,7 +44178,7 @@ def V6_vsububh : HInst<
(outs VecDblRegs:$Vdd32),
(ins VectorRegs:$Vu32, VectorRegs:$Vv32),
"$Vdd32.h = vsub($Vu32.ub,$Vv32.ub)",
-CVI_VX_DV, TypeCVI_VX_DV>, Enc_15290236, Requires<[HasV60T,UseHVX]> {
+tc_eda67dcd, TypeCVI_VX_DV>, Enc_71bb9b, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b101;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011100101;
@@ -43829,7 +44190,7 @@ def V6_vsububh_128B : HInst<
(outs VecDblRegs128B:$Vdd32),
(ins VectorRegs128B:$Vu32, VectorRegs128B:$Vv32),
"$Vdd32.h = vsub($Vu32.ub,$Vv32.ub)",
-CVI_VX_DV, TypeCVI_VX_DV>, Enc_15290236, Requires<[HasV60T,UseHVX]> {
+tc_eda67dcd, TypeCVI_VX_DV>, Enc_71bb9b, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b101;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011100101;
@@ -43865,7 +44226,7 @@ def V6_vsububsat : HInst<
(outs VectorRegs:$Vd32),
(ins VectorRegs:$Vu32, VectorRegs:$Vv32),
"$Vd32.ub = vsub($Vu32.ub,$Vv32.ub):sat",
-CVI_VA, TypeCVI_VA>, Enc_6223403, Requires<[HasV60T,UseHVX]> {
+tc_bbaf280e, TypeCVI_VA>, Enc_45364e, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011100011;
@@ -43877,7 +44238,7 @@ def V6_vsububsat_128B : HInst<
(outs VectorRegs128B:$Vd32),
(ins VectorRegs128B:$Vu32, VectorRegs128B:$Vv32),
"$Vd32.ub = vsub($Vu32.ub,$Vv32.ub):sat",
-CVI_VA, TypeCVI_VA>, Enc_6223403, Requires<[HasV60T,UseHVX]> {
+tc_bbaf280e, TypeCVI_VA>, Enc_45364e, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011100011;
@@ -43913,7 +44274,7 @@ def V6_vsububsat_dv : HInst<
(outs VecDblRegs:$Vdd32),
(ins VecDblRegs:$Vuu32, VecDblRegs:$Vvv32),
"$Vdd32.ub = vsub($Vuu32.ub,$Vvv32.ub):sat",
-CVI_VA_DV, TypeCVI_VA_DV>, Enc_13211717, Requires<[HasV60T,UseHVX]> {
+tc_97c165b9, TypeCVI_VA_DV>, Enc_f8ecf9, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b110;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011100100;
@@ -43925,7 +44286,7 @@ def V6_vsububsat_dv_128B : HInst<
(outs VecDblRegs128B:$Vdd32),
(ins VecDblRegs128B:$Vuu32, VecDblRegs128B:$Vvv32),
"$Vdd32.ub = vsub($Vuu32.ub,$Vvv32.ub):sat",
-CVI_VA_DV, TypeCVI_VA_DV>, Enc_13211717, Requires<[HasV60T,UseHVX]> {
+tc_97c165b9, TypeCVI_VA_DV>, Enc_f8ecf9, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b110;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011100100;
@@ -43961,7 +44322,7 @@ def V6_vsubububb_sat : HInst<
(outs VectorRegs:$Vd32),
(ins VectorRegs:$Vu32, VectorRegs:$Vv32),
"$Vd32.ub = vsub($Vu32.ub,$Vv32.b):sat",
-CVI_VA, TypeCVI_VA>, Enc_6223403, Requires<[HasV62T,UseHVX]> {
+tc_bbaf280e, TypeCVI_VA>, Enc_45364e, Requires<[HasV62T,UseHVX]> {
let Inst{7-5} = 0b101;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011110101;
@@ -43973,7 +44334,7 @@ def V6_vsubububb_sat_128B : HInst<
(outs VectorRegs128B:$Vd32),
(ins VectorRegs128B:$Vu32, VectorRegs128B:$Vv32),
"$Vd32.ub = vsub($Vu32.ub,$Vv32.b):sat",
-CVI_VA, TypeCVI_VA>, Enc_6223403, Requires<[HasV62T,UseHVX]> {
+tc_bbaf280e, TypeCVI_VA>, Enc_45364e, Requires<[HasV62T,UseHVX]> {
let Inst{7-5} = 0b101;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011110101;
@@ -43986,7 +44347,7 @@ def V6_vsubuhsat : HInst<
(outs VectorRegs:$Vd32),
(ins VectorRegs:$Vu32, VectorRegs:$Vv32),
"$Vd32.uh = vsub($Vu32.uh,$Vv32.uh):sat",
-CVI_VA, TypeCVI_VA>, Enc_6223403, Requires<[HasV60T,UseHVX]> {
+tc_bbaf280e, TypeCVI_VA>, Enc_45364e, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b001;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011100011;
@@ -43998,7 +44359,7 @@ def V6_vsubuhsat_128B : HInst<
(outs VectorRegs128B:$Vd32),
(ins VectorRegs128B:$Vu32, VectorRegs128B:$Vv32),
"$Vd32.uh = vsub($Vu32.uh,$Vv32.uh):sat",
-CVI_VA, TypeCVI_VA>, Enc_6223403, Requires<[HasV60T,UseHVX]> {
+tc_bbaf280e, TypeCVI_VA>, Enc_45364e, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b001;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011100011;
@@ -44034,7 +44395,7 @@ def V6_vsubuhsat_dv : HInst<
(outs VecDblRegs:$Vdd32),
(ins VecDblRegs:$Vuu32, VecDblRegs:$Vvv32),
"$Vdd32.uh = vsub($Vuu32.uh,$Vvv32.uh):sat",
-CVI_VA_DV, TypeCVI_VA_DV>, Enc_13211717, Requires<[HasV60T,UseHVX]> {
+tc_97c165b9, TypeCVI_VA_DV>, Enc_f8ecf9, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b111;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011100100;
@@ -44046,7 +44407,7 @@ def V6_vsubuhsat_dv_128B : HInst<
(outs VecDblRegs128B:$Vdd32),
(ins VecDblRegs128B:$Vuu32, VecDblRegs128B:$Vvv32),
"$Vdd32.uh = vsub($Vuu32.uh,$Vvv32.uh):sat",
-CVI_VA_DV, TypeCVI_VA_DV>, Enc_13211717, Requires<[HasV60T,UseHVX]> {
+tc_97c165b9, TypeCVI_VA_DV>, Enc_f8ecf9, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b111;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011100100;
@@ -44082,7 +44443,7 @@ def V6_vsubuhw : HInst<
(outs VecDblRegs:$Vdd32),
(ins VectorRegs:$Vu32, VectorRegs:$Vv32),
"$Vdd32.w = vsub($Vu32.uh,$Vv32.uh)",
-CVI_VX_DV, TypeCVI_VX_DV>, Enc_15290236, Requires<[HasV60T,UseHVX]> {
+tc_eda67dcd, TypeCVI_VX_DV>, Enc_71bb9b, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b110;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011100101;
@@ -44094,7 +44455,7 @@ def V6_vsubuhw_128B : HInst<
(outs VecDblRegs128B:$Vdd32),
(ins VectorRegs128B:$Vu32, VectorRegs128B:$Vv32),
"$Vdd32.w = vsub($Vu32.uh,$Vv32.uh)",
-CVI_VX_DV, TypeCVI_VX_DV>, Enc_15290236, Requires<[HasV60T,UseHVX]> {
+tc_eda67dcd, TypeCVI_VX_DV>, Enc_71bb9b, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b110;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011100101;
@@ -44130,7 +44491,7 @@ def V6_vsubuwsat : HInst<
(outs VectorRegs:$Vd32),
(ins VectorRegs:$Vu32, VectorRegs:$Vv32),
"$Vd32.uw = vsub($Vu32.uw,$Vv32.uw):sat",
-CVI_VA, TypeCVI_VA>, Enc_6223403, Requires<[HasV62T,UseHVX]> {
+tc_bbaf280e, TypeCVI_VA>, Enc_45364e, Requires<[HasV62T,UseHVX]> {
let Inst{7-5} = 0b100;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011111110;
@@ -44142,7 +44503,7 @@ def V6_vsubuwsat_128B : HInst<
(outs VectorRegs128B:$Vd32),
(ins VectorRegs128B:$Vu32, VectorRegs128B:$Vv32),
"$Vd32.uw = vsub($Vu32.uw,$Vv32.uw):sat",
-CVI_VA, TypeCVI_VA>, Enc_6223403, Requires<[HasV62T,UseHVX]> {
+tc_bbaf280e, TypeCVI_VA>, Enc_45364e, Requires<[HasV62T,UseHVX]> {
let Inst{7-5} = 0b100;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011111110;
@@ -44178,7 +44539,7 @@ def V6_vsubuwsat_dv : HInst<
(outs VecDblRegs:$Vdd32),
(ins VecDblRegs:$Vuu32, VecDblRegs:$Vvv32),
"$Vdd32.uw = vsub($Vuu32.uw,$Vvv32.uw):sat",
-CVI_VA_DV, TypeCVI_VA_DV>, Enc_13211717, Requires<[HasV62T,UseHVX]> {
+tc_97c165b9, TypeCVI_VA_DV>, Enc_f8ecf9, Requires<[HasV62T,UseHVX]> {
let Inst{7-5} = 0b011;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011110101;
@@ -44190,7 +44551,7 @@ def V6_vsubuwsat_dv_128B : HInst<
(outs VecDblRegs128B:$Vdd32),
(ins VecDblRegs128B:$Vuu32, VecDblRegs128B:$Vvv32),
"$Vdd32.uw = vsub($Vuu32.uw,$Vvv32.uw):sat",
-CVI_VA_DV, TypeCVI_VA_DV>, Enc_13211717, Requires<[HasV62T,UseHVX]> {
+tc_97c165b9, TypeCVI_VA_DV>, Enc_f8ecf9, Requires<[HasV62T,UseHVX]> {
let Inst{7-5} = 0b011;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011110101;
@@ -44226,7 +44587,7 @@ def V6_vsubw : HInst<
(outs VectorRegs:$Vd32),
(ins VectorRegs:$Vu32, VectorRegs:$Vv32),
"$Vd32.w = vsub($Vu32.w,$Vv32.w)",
-CVI_VA, TypeCVI_VA>, Enc_6223403, Requires<[HasV60T,UseHVX]> {
+tc_bbaf280e, TypeCVI_VA>, Enc_45364e, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b111;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011100010;
@@ -44238,7 +44599,7 @@ def V6_vsubw_128B : HInst<
(outs VectorRegs128B:$Vd32),
(ins VectorRegs128B:$Vu32, VectorRegs128B:$Vv32),
"$Vd32.w = vsub($Vu32.w,$Vv32.w)",
-CVI_VA, TypeCVI_VA>, Enc_6223403, Requires<[HasV60T,UseHVX]> {
+tc_bbaf280e, TypeCVI_VA>, Enc_45364e, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b111;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011100010;
@@ -44274,7 +44635,7 @@ def V6_vsubw_dv : HInst<
(outs VecDblRegs:$Vdd32),
(ins VecDblRegs:$Vuu32, VecDblRegs:$Vvv32),
"$Vdd32.w = vsub($Vuu32.w,$Vvv32.w)",
-CVI_VA_DV, TypeCVI_VA_DV>, Enc_13211717, Requires<[HasV60T,UseHVX]> {
+tc_97c165b9, TypeCVI_VA_DV>, Enc_f8ecf9, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b101;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011100100;
@@ -44286,7 +44647,7 @@ def V6_vsubw_dv_128B : HInst<
(outs VecDblRegs128B:$Vdd32),
(ins VecDblRegs128B:$Vuu32, VecDblRegs128B:$Vvv32),
"$Vdd32.w = vsub($Vuu32.w,$Vvv32.w)",
-CVI_VA_DV, TypeCVI_VA_DV>, Enc_13211717, Requires<[HasV60T,UseHVX]> {
+tc_97c165b9, TypeCVI_VA_DV>, Enc_f8ecf9, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b101;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011100100;
@@ -44322,7 +44683,7 @@ def V6_vsubwnq : HInst<
(outs VectorRegs:$Vx32),
(ins VecPredRegs:$Qv4, VectorRegs:$Vx32in, VectorRegs:$Vu32),
"if (!$Qv4) $Vx32.w -= $Vu32.w",
-CVI_VA, TypeCVI_VA>, Enc_12535811, Requires<[HasV60T,UseHVX]> {
+tc_a3127e12, TypeCVI_VA>, Enc_a90628, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b011;
let Inst{13-13} = 0b1;
let Inst{21-16} = 0b000010;
@@ -44336,7 +44697,7 @@ def V6_vsubwnq_128B : HInst<
(outs VectorRegs128B:$Vx32),
(ins VecPredRegs128B:$Qv4, VectorRegs128B:$Vx32in, VectorRegs128B:$Vu32),
"if (!$Qv4) $Vx32.w -= $Vu32.w",
-CVI_VA, TypeCVI_VA>, Enc_12535811, Requires<[HasV60T,UseHVX]> {
+tc_a3127e12, TypeCVI_VA>, Enc_a90628, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b011;
let Inst{13-13} = 0b1;
let Inst{21-16} = 0b000010;
@@ -44376,7 +44737,7 @@ def V6_vsubwq : HInst<
(outs VectorRegs:$Vx32),
(ins VecPredRegs:$Qv4, VectorRegs:$Vx32in, VectorRegs:$Vu32),
"if ($Qv4) $Vx32.w -= $Vu32.w",
-CVI_VA, TypeCVI_VA>, Enc_12535811, Requires<[HasV60T,UseHVX]> {
+tc_a3127e12, TypeCVI_VA>, Enc_a90628, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b1;
let Inst{21-16} = 0b000010;
@@ -44390,7 +44751,7 @@ def V6_vsubwq_128B : HInst<
(outs VectorRegs128B:$Vx32),
(ins VecPredRegs128B:$Qv4, VectorRegs128B:$Vx32in, VectorRegs128B:$Vu32),
"if ($Qv4) $Vx32.w -= $Vu32.w",
-CVI_VA, TypeCVI_VA>, Enc_12535811, Requires<[HasV60T,UseHVX]> {
+tc_a3127e12, TypeCVI_VA>, Enc_a90628, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b1;
let Inst{21-16} = 0b000010;
@@ -44430,7 +44791,7 @@ def V6_vsubwsat : HInst<
(outs VectorRegs:$Vd32),
(ins VectorRegs:$Vu32, VectorRegs:$Vv32),
"$Vd32.w = vsub($Vu32.w,$Vv32.w):sat",
-CVI_VA, TypeCVI_VA>, Enc_6223403, Requires<[HasV60T,UseHVX]> {
+tc_bbaf280e, TypeCVI_VA>, Enc_45364e, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b011;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011100011;
@@ -44442,7 +44803,7 @@ def V6_vsubwsat_128B : HInst<
(outs VectorRegs128B:$Vd32),
(ins VectorRegs128B:$Vu32, VectorRegs128B:$Vv32),
"$Vd32.w = vsub($Vu32.w,$Vv32.w):sat",
-CVI_VA, TypeCVI_VA>, Enc_6223403, Requires<[HasV60T,UseHVX]> {
+tc_bbaf280e, TypeCVI_VA>, Enc_45364e, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b011;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011100011;
@@ -44478,7 +44839,7 @@ def V6_vsubwsat_dv : HInst<
(outs VecDblRegs:$Vdd32),
(ins VecDblRegs:$Vuu32, VecDblRegs:$Vvv32),
"$Vdd32.w = vsub($Vuu32.w,$Vvv32.w):sat",
-CVI_VA_DV, TypeCVI_VA_DV>, Enc_13211717, Requires<[HasV60T,UseHVX]> {
+tc_97c165b9, TypeCVI_VA_DV>, Enc_f8ecf9, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b001;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011100101;
@@ -44490,7 +44851,7 @@ def V6_vsubwsat_dv_128B : HInst<
(outs VecDblRegs128B:$Vdd32),
(ins VecDblRegs128B:$Vuu32, VecDblRegs128B:$Vvv32),
"$Vdd32.w = vsub($Vuu32.w,$Vvv32.w):sat",
-CVI_VA_DV, TypeCVI_VA_DV>, Enc_13211717, Requires<[HasV60T,UseHVX]> {
+tc_97c165b9, TypeCVI_VA_DV>, Enc_f8ecf9, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b001;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011100101;
@@ -44526,7 +44887,7 @@ def V6_vswap : HInst<
(outs VecDblRegs:$Vdd32),
(ins VecPredRegs:$Qt4, VectorRegs:$Vu32, VectorRegs:$Vv32),
"$Vdd32 = vswap($Qt4,$Vu32,$Vv32)",
-CVI_VA_DV, TypeCVI_VA_DV>, Enc_11424254, Requires<[HasV60T,UseHVX]> {
+tc_316c637c, TypeCVI_VA_DV>, Enc_3dac0b, Requires<[HasV60T,UseHVX]> {
let Inst{7-7} = 0b0;
let Inst{13-13} = 0b1;
let Inst{31-21} = 0b00011110101;
@@ -44538,7 +44899,7 @@ def V6_vswap_128B : HInst<
(outs VecDblRegs128B:$Vdd32),
(ins VecPredRegs128B:$Qt4, VectorRegs128B:$Vu32, VectorRegs128B:$Vv32),
"$Vdd32 = vswap($Qt4,$Vu32,$Vv32)",
-CVI_VA_DV, TypeCVI_VA_DV>, Enc_11424254, Requires<[HasV60T,UseHVX]> {
+tc_316c637c, TypeCVI_VA_DV>, Enc_3dac0b, Requires<[HasV60T,UseHVX]> {
let Inst{7-7} = 0b0;
let Inst{13-13} = 0b1;
let Inst{31-21} = 0b00011110101;
@@ -44551,7 +44912,7 @@ def V6_vtmpyb : HInst<
(outs VecDblRegs:$Vdd32),
(ins VecDblRegs:$Vuu32, IntRegs:$Rt32),
"$Vdd32.h = vtmpy($Vuu32.b,$Rt32.b)",
-CVI_VX_DV, TypeCVI_VX_DV>, Enc_5023792, Requires<[HasV60T,UseHVX]> {
+tc_7c3f55c4, TypeCVI_VX_DV>, Enc_aad80c, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011001000;
@@ -44563,7 +44924,7 @@ def V6_vtmpyb_128B : HInst<
(outs VecDblRegs128B:$Vdd32),
(ins VecDblRegs128B:$Vuu32, IntRegs:$Rt32),
"$Vdd32.h = vtmpy($Vuu32.b,$Rt32.b)",
-CVI_VX_DV, TypeCVI_VX_DV>, Enc_5023792, Requires<[HasV60T,UseHVX]> {
+tc_7c3f55c4, TypeCVI_VX_DV>, Enc_aad80c, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011001000;
@@ -44576,7 +44937,7 @@ def V6_vtmpyb_acc : HInst<
(outs VecDblRegs:$Vxx32),
(ins VecDblRegs:$Vxx32in, VecDblRegs:$Vuu32, IntRegs:$Rt32),
"$Vxx32.h += vtmpy($Vuu32.b,$Rt32.b)",
-CVI_VX_DV, TypeCVI_VX_DV>, Enc_4327792, Requires<[HasV60T,UseHVX]> {
+tc_d98f4d63, TypeCVI_VX_DV>, Enc_d6990d, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b1;
let Inst{31-21} = 0b00011001000;
@@ -44590,7 +44951,7 @@ def V6_vtmpyb_acc_128B : HInst<
(outs VecDblRegs128B:$Vxx32),
(ins VecDblRegs128B:$Vxx32in, VecDblRegs128B:$Vuu32, IntRegs:$Rt32),
"$Vxx32.h += vtmpy($Vuu32.b,$Rt32.b)",
-CVI_VX_DV, TypeCVI_VX_DV>, Enc_4327792, Requires<[HasV60T,UseHVX]> {
+tc_d98f4d63, TypeCVI_VX_DV>, Enc_d6990d, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b1;
let Inst{31-21} = 0b00011001000;
@@ -44655,7 +45016,7 @@ def V6_vtmpybus : HInst<
(outs VecDblRegs:$Vdd32),
(ins VecDblRegs:$Vuu32, IntRegs:$Rt32),
"$Vdd32.h = vtmpy($Vuu32.ub,$Rt32.b)",
-CVI_VX_DV, TypeCVI_VX_DV>, Enc_5023792, Requires<[HasV60T,UseHVX]> {
+tc_7c3f55c4, TypeCVI_VX_DV>, Enc_aad80c, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b001;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011001000;
@@ -44667,7 +45028,7 @@ def V6_vtmpybus_128B : HInst<
(outs VecDblRegs128B:$Vdd32),
(ins VecDblRegs128B:$Vuu32, IntRegs:$Rt32),
"$Vdd32.h = vtmpy($Vuu32.ub,$Rt32.b)",
-CVI_VX_DV, TypeCVI_VX_DV>, Enc_5023792, Requires<[HasV60T,UseHVX]> {
+tc_7c3f55c4, TypeCVI_VX_DV>, Enc_aad80c, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b001;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011001000;
@@ -44680,7 +45041,7 @@ def V6_vtmpybus_acc : HInst<
(outs VecDblRegs:$Vxx32),
(ins VecDblRegs:$Vxx32in, VecDblRegs:$Vuu32, IntRegs:$Rt32),
"$Vxx32.h += vtmpy($Vuu32.ub,$Rt32.b)",
-CVI_VX_DV, TypeCVI_VX_DV>, Enc_4327792, Requires<[HasV60T,UseHVX]> {
+tc_d98f4d63, TypeCVI_VX_DV>, Enc_d6990d, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b001;
let Inst{13-13} = 0b1;
let Inst{31-21} = 0b00011001000;
@@ -44694,7 +45055,7 @@ def V6_vtmpybus_acc_128B : HInst<
(outs VecDblRegs128B:$Vxx32),
(ins VecDblRegs128B:$Vxx32in, VecDblRegs128B:$Vuu32, IntRegs:$Rt32),
"$Vxx32.h += vtmpy($Vuu32.ub,$Rt32.b)",
-CVI_VX_DV, TypeCVI_VX_DV>, Enc_4327792, Requires<[HasV60T,UseHVX]> {
+tc_d98f4d63, TypeCVI_VX_DV>, Enc_d6990d, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b001;
let Inst{13-13} = 0b1;
let Inst{31-21} = 0b00011001000;
@@ -44759,7 +45120,7 @@ def V6_vtmpyhb : HInst<
(outs VecDblRegs:$Vdd32),
(ins VecDblRegs:$Vuu32, IntRegs:$Rt32),
"$Vdd32.w = vtmpy($Vuu32.h,$Rt32.b)",
-CVI_VX_DV, TypeCVI_VX_DV>, Enc_5023792, Requires<[HasV60T,UseHVX]> {
+tc_7c3f55c4, TypeCVI_VX_DV>, Enc_aad80c, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b100;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011001101;
@@ -44771,7 +45132,7 @@ def V6_vtmpyhb_128B : HInst<
(outs VecDblRegs128B:$Vdd32),
(ins VecDblRegs128B:$Vuu32, IntRegs:$Rt32),
"$Vdd32.w = vtmpy($Vuu32.h,$Rt32.b)",
-CVI_VX_DV, TypeCVI_VX_DV>, Enc_5023792, Requires<[HasV60T,UseHVX]> {
+tc_7c3f55c4, TypeCVI_VX_DV>, Enc_aad80c, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b100;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011001101;
@@ -44784,7 +45145,7 @@ def V6_vtmpyhb_acc : HInst<
(outs VecDblRegs:$Vxx32),
(ins VecDblRegs:$Vxx32in, VecDblRegs:$Vuu32, IntRegs:$Rt32),
"$Vxx32.w += vtmpy($Vuu32.h,$Rt32.b)",
-CVI_VX_DV, TypeCVI_VX_DV>, Enc_4327792, Requires<[HasV60T,UseHVX]> {
+tc_d98f4d63, TypeCVI_VX_DV>, Enc_d6990d, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b010;
let Inst{13-13} = 0b1;
let Inst{31-21} = 0b00011001000;
@@ -44798,7 +45159,7 @@ def V6_vtmpyhb_acc_128B : HInst<
(outs VecDblRegs128B:$Vxx32),
(ins VecDblRegs128B:$Vxx32in, VecDblRegs128B:$Vuu32, IntRegs:$Rt32),
"$Vxx32.w += vtmpy($Vuu32.h,$Rt32.b)",
-CVI_VX_DV, TypeCVI_VX_DV>, Enc_4327792, Requires<[HasV60T,UseHVX]> {
+tc_d98f4d63, TypeCVI_VX_DV>, Enc_d6990d, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b010;
let Inst{13-13} = 0b1;
let Inst{31-21} = 0b00011001000;
@@ -44892,7 +45253,7 @@ def V6_vunpackb : HInst<
(outs VecDblRegs:$Vdd32),
(ins VectorRegs:$Vu32),
"$Vdd32.h = vunpack($Vu32.b)",
-CVI_VP_VS, TypeCVI_VP_VS>, Enc_14631806, Requires<[HasV60T,UseHVX]> {
+tc_d7bea0ec, TypeCVI_VP_VS>, Enc_dd766a, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b010;
let Inst{13-13} = 0b0;
let Inst{31-16} = 0b0001111000000001;
@@ -44904,7 +45265,7 @@ def V6_vunpackb_128B : HInst<
(outs VecDblRegs128B:$Vdd32),
(ins VectorRegs128B:$Vu32),
"$Vdd32.h = vunpack($Vu32.b)",
-CVI_VP_VS, TypeCVI_VP_VS>, Enc_14631806, Requires<[HasV60T,UseHVX]> {
+tc_d7bea0ec, TypeCVI_VP_VS>, Enc_dd766a, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b010;
let Inst{13-13} = 0b0;
let Inst{31-16} = 0b0001111000000001;
@@ -44940,7 +45301,7 @@ def V6_vunpackh : HInst<
(outs VecDblRegs:$Vdd32),
(ins VectorRegs:$Vu32),
"$Vdd32.w = vunpack($Vu32.h)",
-CVI_VP_VS, TypeCVI_VP_VS>, Enc_14631806, Requires<[HasV60T,UseHVX]> {
+tc_d7bea0ec, TypeCVI_VP_VS>, Enc_dd766a, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b011;
let Inst{13-13} = 0b0;
let Inst{31-16} = 0b0001111000000001;
@@ -44952,7 +45313,7 @@ def V6_vunpackh_128B : HInst<
(outs VecDblRegs128B:$Vdd32),
(ins VectorRegs128B:$Vu32),
"$Vdd32.w = vunpack($Vu32.h)",
-CVI_VP_VS, TypeCVI_VP_VS>, Enc_14631806, Requires<[HasV60T,UseHVX]> {
+tc_d7bea0ec, TypeCVI_VP_VS>, Enc_dd766a, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b011;
let Inst{13-13} = 0b0;
let Inst{31-16} = 0b0001111000000001;
@@ -44988,7 +45349,7 @@ def V6_vunpackob : HInst<
(outs VecDblRegs:$Vxx32),
(ins VecDblRegs:$Vxx32in, VectorRegs:$Vu32),
"$Vxx32.h |= vunpacko($Vu32.b)",
-CVI_VP_VS_LONG, TypeCVI_VP_VS>, Enc_12669374, Requires<[HasV60T,UseHVX]> {
+tc_72ad7b54, TypeCVI_VP_VS>, Enc_500cb0, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b1;
let Inst{31-16} = 0b0001111000000000;
@@ -45002,7 +45363,7 @@ def V6_vunpackob_128B : HInst<
(outs VecDblRegs128B:$Vxx32),
(ins VecDblRegs128B:$Vxx32in, VectorRegs128B:$Vu32),
"$Vxx32.h |= vunpacko($Vu32.b)",
-CVI_VP_VS_LONG, TypeCVI_VP_VS>, Enc_12669374, Requires<[HasV60T,UseHVX]> {
+tc_72ad7b54, TypeCVI_VP_VS>, Enc_500cb0, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b1;
let Inst{31-16} = 0b0001111000000000;
@@ -45042,7 +45403,7 @@ def V6_vunpackoh : HInst<
(outs VecDblRegs:$Vxx32),
(ins VecDblRegs:$Vxx32in, VectorRegs:$Vu32),
"$Vxx32.w |= vunpacko($Vu32.h)",
-CVI_VP_VS_LONG, TypeCVI_VP_VS>, Enc_12669374, Requires<[HasV60T,UseHVX]> {
+tc_72ad7b54, TypeCVI_VP_VS>, Enc_500cb0, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b001;
let Inst{13-13} = 0b1;
let Inst{31-16} = 0b0001111000000000;
@@ -45056,7 +45417,7 @@ def V6_vunpackoh_128B : HInst<
(outs VecDblRegs128B:$Vxx32),
(ins VecDblRegs128B:$Vxx32in, VectorRegs128B:$Vu32),
"$Vxx32.w |= vunpacko($Vu32.h)",
-CVI_VP_VS_LONG, TypeCVI_VP_VS>, Enc_12669374, Requires<[HasV60T,UseHVX]> {
+tc_72ad7b54, TypeCVI_VP_VS>, Enc_500cb0, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b001;
let Inst{13-13} = 0b1;
let Inst{31-16} = 0b0001111000000000;
@@ -45098,7 +45459,7 @@ def V6_vunpackub : HInst<
(outs VecDblRegs:$Vdd32),
(ins VectorRegs:$Vu32),
"$Vdd32.uh = vunpack($Vu32.ub)",
-CVI_VP_VS, TypeCVI_VP_VS>, Enc_14631806, Requires<[HasV60T,UseHVX]> {
+tc_d7bea0ec, TypeCVI_VP_VS>, Enc_dd766a, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b0;
let Inst{31-16} = 0b0001111000000001;
@@ -45110,7 +45471,7 @@ def V6_vunpackub_128B : HInst<
(outs VecDblRegs128B:$Vdd32),
(ins VectorRegs128B:$Vu32),
"$Vdd32.uh = vunpack($Vu32.ub)",
-CVI_VP_VS, TypeCVI_VP_VS>, Enc_14631806, Requires<[HasV60T,UseHVX]> {
+tc_d7bea0ec, TypeCVI_VP_VS>, Enc_dd766a, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b0;
let Inst{31-16} = 0b0001111000000001;
@@ -45146,7 +45507,7 @@ def V6_vunpackuh : HInst<
(outs VecDblRegs:$Vdd32),
(ins VectorRegs:$Vu32),
"$Vdd32.uw = vunpack($Vu32.uh)",
-CVI_VP_VS, TypeCVI_VP_VS>, Enc_14631806, Requires<[HasV60T,UseHVX]> {
+tc_d7bea0ec, TypeCVI_VP_VS>, Enc_dd766a, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b001;
let Inst{13-13} = 0b0;
let Inst{31-16} = 0b0001111000000001;
@@ -45158,7 +45519,7 @@ def V6_vunpackuh_128B : HInst<
(outs VecDblRegs128B:$Vdd32),
(ins VectorRegs128B:$Vu32),
"$Vdd32.uw = vunpack($Vu32.uh)",
-CVI_VP_VS, TypeCVI_VP_VS>, Enc_14631806, Requires<[HasV60T,UseHVX]> {
+tc_d7bea0ec, TypeCVI_VP_VS>, Enc_dd766a, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b001;
let Inst{13-13} = 0b0;
let Inst{31-16} = 0b0001111000000001;
@@ -45194,7 +45555,7 @@ def V6_vwhist128 : HInst<
(outs),
(ins),
"vwhist128",
-CVI_HIST, TypeCVI_HIST>, Enc_0, Requires<[HasV62T,UseHVX]> {
+tc_e5053c8f, TypeCVI_HIST>, Enc_e3b0c4, Requires<[HasV62T,UseHVX]> {
let Inst{13-0} = 0b10010010000000;
let Inst{31-16} = 0b0001111000000000;
let DecoderNamespace = "EXT_mmvec";
@@ -45203,7 +45564,7 @@ def V6_vwhist128_128B : HInst<
(outs),
(ins),
"vwhist128",
-CVI_HIST, TypeCVI_HIST>, Enc_0, Requires<[HasV62T,UseHVX]> {
+tc_e5053c8f, TypeCVI_HIST>, Enc_e3b0c4, Requires<[HasV62T,UseHVX]> {
let Inst{13-0} = 0b10010010000000;
let Inst{31-16} = 0b0001111000000000;
let DecoderNamespace = "EXT_mmvec";
@@ -45213,7 +45574,7 @@ def V6_vwhist128m : HInst<
(outs),
(ins u1_0Imm:$Ii),
"vwhist128(#$Ii)",
-CVI_HIST, TypeCVI_HIST>, Enc_1291652, Requires<[HasV62T,UseHVX]> {
+tc_b77635b4, TypeCVI_HIST>, Enc_efaed8, Requires<[HasV62T,UseHVX]> {
let Inst{7-0} = 0b10000000;
let Inst{13-9} = 0b10011;
let Inst{31-16} = 0b0001111000000000;
@@ -45223,7 +45584,7 @@ def V6_vwhist128m_128B : HInst<
(outs),
(ins u1_0Imm:$Ii),
"vwhist128(#$Ii)",
-CVI_HIST, TypeCVI_HIST>, Enc_1291652, Requires<[HasV62T,UseHVX]> {
+tc_b77635b4, TypeCVI_HIST>, Enc_efaed8, Requires<[HasV62T,UseHVX]> {
let Inst{7-0} = 0b10000000;
let Inst{13-9} = 0b10011;
let Inst{31-16} = 0b0001111000000000;
@@ -45234,7 +45595,7 @@ def V6_vwhist128q : HInst<
(outs),
(ins VecPredRegs:$Qv4),
"vwhist128($Qv4)",
-CVI_HIST, TypeCVI_HIST>, Enc_4109168, Requires<[HasV62T,UseHVX]> {
+tc_cedf314b, TypeCVI_HIST>, Enc_217147, Requires<[HasV62T,UseHVX]> {
let Inst{13-0} = 0b10010010000000;
let Inst{21-16} = 0b000010;
let Inst{31-24} = 0b00011110;
@@ -45244,7 +45605,7 @@ def V6_vwhist128q_128B : HInst<
(outs),
(ins VecPredRegs128B:$Qv4),
"vwhist128($Qv4)",
-CVI_HIST, TypeCVI_HIST>, Enc_4109168, Requires<[HasV62T,UseHVX]> {
+tc_cedf314b, TypeCVI_HIST>, Enc_217147, Requires<[HasV62T,UseHVX]> {
let Inst{13-0} = 0b10010010000000;
let Inst{21-16} = 0b000010;
let Inst{31-24} = 0b00011110;
@@ -45255,7 +45616,7 @@ def V6_vwhist128qm : HInst<
(outs),
(ins VecPredRegs:$Qv4, u1_0Imm:$Ii),
"vwhist128($Qv4,#$Ii)",
-CVI_HIST, TypeCVI_HIST>, Enc_7978128, Requires<[HasV62T,UseHVX]> {
+tc_28978789, TypeCVI_HIST>, Enc_802dc0, Requires<[HasV62T,UseHVX]> {
let Inst{7-0} = 0b10000000;
let Inst{13-9} = 0b10011;
let Inst{21-16} = 0b000010;
@@ -45266,7 +45627,7 @@ def V6_vwhist128qm_128B : HInst<
(outs),
(ins VecPredRegs128B:$Qv4, u1_0Imm:$Ii),
"vwhist128($Qv4,#$Ii)",
-CVI_HIST, TypeCVI_HIST>, Enc_7978128, Requires<[HasV62T,UseHVX]> {
+tc_28978789, TypeCVI_HIST>, Enc_802dc0, Requires<[HasV62T,UseHVX]> {
let Inst{7-0} = 0b10000000;
let Inst{13-9} = 0b10011;
let Inst{21-16} = 0b000010;
@@ -45278,7 +45639,7 @@ def V6_vwhist256 : HInst<
(outs),
(ins),
"vwhist256",
-CVI_HIST, TypeCVI_HIST>, Enc_0, Requires<[HasV62T,UseHVX]> {
+tc_e5053c8f, TypeCVI_HIST>, Enc_e3b0c4, Requires<[HasV62T,UseHVX]> {
let Inst{13-0} = 0b10001010000000;
let Inst{31-16} = 0b0001111000000000;
let DecoderNamespace = "EXT_mmvec";
@@ -45287,7 +45648,7 @@ def V6_vwhist256_128B : HInst<
(outs),
(ins),
"vwhist256",
-CVI_HIST, TypeCVI_HIST>, Enc_0, Requires<[HasV62T,UseHVX]> {
+tc_e5053c8f, TypeCVI_HIST>, Enc_e3b0c4, Requires<[HasV62T,UseHVX]> {
let Inst{13-0} = 0b10001010000000;
let Inst{31-16} = 0b0001111000000000;
let DecoderNamespace = "EXT_mmvec";
@@ -45297,7 +45658,7 @@ def V6_vwhist256_sat : HInst<
(outs),
(ins),
"vwhist256:sat",
-CVI_HIST, TypeCVI_HIST>, Enc_0, Requires<[HasV62T,UseHVX]> {
+tc_e5053c8f, TypeCVI_HIST>, Enc_e3b0c4, Requires<[HasV62T,UseHVX]> {
let Inst{13-0} = 0b10001110000000;
let Inst{31-16} = 0b0001111000000000;
let DecoderNamespace = "EXT_mmvec";
@@ -45306,7 +45667,7 @@ def V6_vwhist256_sat_128B : HInst<
(outs),
(ins),
"vwhist256:sat",
-CVI_HIST, TypeCVI_HIST>, Enc_0, Requires<[HasV62T,UseHVX]> {
+tc_e5053c8f, TypeCVI_HIST>, Enc_e3b0c4, Requires<[HasV62T,UseHVX]> {
let Inst{13-0} = 0b10001110000000;
let Inst{31-16} = 0b0001111000000000;
let DecoderNamespace = "EXT_mmvec";
@@ -45316,7 +45677,7 @@ def V6_vwhist256q : HInst<
(outs),
(ins VecPredRegs:$Qv4),
"vwhist256($Qv4)",
-CVI_HIST, TypeCVI_HIST>, Enc_4109168, Requires<[HasV62T,UseHVX]> {
+tc_cedf314b, TypeCVI_HIST>, Enc_217147, Requires<[HasV62T,UseHVX]> {
let Inst{13-0} = 0b10001010000000;
let Inst{21-16} = 0b000010;
let Inst{31-24} = 0b00011110;
@@ -45326,7 +45687,7 @@ def V6_vwhist256q_128B : HInst<
(outs),
(ins VecPredRegs128B:$Qv4),
"vwhist256($Qv4)",
-CVI_HIST, TypeCVI_HIST>, Enc_4109168, Requires<[HasV62T,UseHVX]> {
+tc_cedf314b, TypeCVI_HIST>, Enc_217147, Requires<[HasV62T,UseHVX]> {
let Inst{13-0} = 0b10001010000000;
let Inst{21-16} = 0b000010;
let Inst{31-24} = 0b00011110;
@@ -45337,7 +45698,7 @@ def V6_vwhist256q_sat : HInst<
(outs),
(ins VecPredRegs:$Qv4),
"vwhist256($Qv4):sat",
-CVI_HIST, TypeCVI_HIST>, Enc_4109168, Requires<[HasV62T,UseHVX]> {
+tc_cedf314b, TypeCVI_HIST>, Enc_217147, Requires<[HasV62T,UseHVX]> {
let Inst{13-0} = 0b10001110000000;
let Inst{21-16} = 0b000010;
let Inst{31-24} = 0b00011110;
@@ -45347,7 +45708,7 @@ def V6_vwhist256q_sat_128B : HInst<
(outs),
(ins VecPredRegs128B:$Qv4),
"vwhist256($Qv4):sat",
-CVI_HIST, TypeCVI_HIST>, Enc_4109168, Requires<[HasV62T,UseHVX]> {
+tc_cedf314b, TypeCVI_HIST>, Enc_217147, Requires<[HasV62T,UseHVX]> {
let Inst{13-0} = 0b10001110000000;
let Inst{21-16} = 0b000010;
let Inst{31-24} = 0b00011110;
@@ -45358,7 +45719,7 @@ def V6_vxor : HInst<
(outs VectorRegs:$Vd32),
(ins VectorRegs:$Vu32, VectorRegs:$Vv32),
"$Vd32 = vxor($Vu32,$Vv32)",
-CVI_VA, TypeCVI_VA>, Enc_6223403, Requires<[HasV60T,UseHVX]> {
+tc_bbaf280e, TypeCVI_VA>, Enc_45364e, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b111;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011100001;
@@ -45370,7 +45731,7 @@ def V6_vxor_128B : HInst<
(outs VectorRegs128B:$Vd32),
(ins VectorRegs128B:$Vu32, VectorRegs128B:$Vv32),
"$Vd32 = vxor($Vu32,$Vv32)",
-CVI_VA, TypeCVI_VA>, Enc_6223403, Requires<[HasV60T,UseHVX]> {
+tc_bbaf280e, TypeCVI_VA>, Enc_45364e, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b111;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b00011100001;
@@ -45383,7 +45744,7 @@ def V6_vzb : HInst<
(outs VecDblRegs:$Vdd32),
(ins VectorRegs:$Vu32),
"$Vdd32.uh = vzxt($Vu32.ub)",
-CVI_VA_DV, TypeCVI_VA_DV>, Enc_14631806, Requires<[HasV60T,UseHVX]> {
+tc_644584f8, TypeCVI_VA_DV>, Enc_dd766a, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b001;
let Inst{13-13} = 0b0;
let Inst{31-16} = 0b0001111000000010;
@@ -45395,7 +45756,7 @@ def V6_vzb_128B : HInst<
(outs VecDblRegs128B:$Vdd32),
(ins VectorRegs128B:$Vu32),
"$Vdd32.uh = vzxt($Vu32.ub)",
-CVI_VA_DV, TypeCVI_VA_DV>, Enc_14631806, Requires<[HasV60T,UseHVX]> {
+tc_644584f8, TypeCVI_VA_DV>, Enc_dd766a, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b001;
let Inst{13-13} = 0b0;
let Inst{31-16} = 0b0001111000000010;
@@ -45431,7 +45792,7 @@ def V6_vzh : HInst<
(outs VecDblRegs:$Vdd32),
(ins VectorRegs:$Vu32),
"$Vdd32.uw = vzxt($Vu32.uh)",
-CVI_VA_DV, TypeCVI_VA_DV>, Enc_14631806, Requires<[HasV60T,UseHVX]> {
+tc_644584f8, TypeCVI_VA_DV>, Enc_dd766a, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b010;
let Inst{13-13} = 0b0;
let Inst{31-16} = 0b0001111000000010;
@@ -45443,7 +45804,7 @@ def V6_vzh_128B : HInst<
(outs VecDblRegs128B:$Vdd32),
(ins VectorRegs128B:$Vu32),
"$Vdd32.uw = vzxt($Vu32.uh)",
-CVI_VA_DV, TypeCVI_VA_DV>, Enc_14631806, Requires<[HasV60T,UseHVX]> {
+tc_644584f8, TypeCVI_VA_DV>, Enc_dd766a, Requires<[HasV60T,UseHVX]> {
let Inst{7-5} = 0b010;
let Inst{13-13} = 0b0;
let Inst{31-16} = 0b0001111000000010;
@@ -45479,7 +45840,7 @@ def Y2_barrier : HInst<
(outs),
(ins),
"barrier",
-ST_tc_3stall_SLOT0, TypeST>, Enc_0 {
+tc_ef2676fd, TypeST>, Enc_e3b0c4 {
let Inst{13-0} = 0b00000000000000;
let Inst{31-16} = 0b1010100000000000;
let isSoloAX = 1;
@@ -45489,7 +45850,7 @@ def Y2_break : HInst<
(outs),
(ins),
"brkpt",
-CR_tc_3x_SLOT3, TypeCR>, Enc_0 {
+tc_bcf0e36e, TypeCR>, Enc_e3b0c4 {
let Inst{13-0} = 0b00000000000000;
let Inst{31-16} = 0b0110110000100000;
let isSolo = 1;
@@ -45498,7 +45859,7 @@ def Y2_dccleana : HInst<
(outs),
(ins IntRegs:$Rs32),
"dccleana($Rs32)",
-ST_tc_ld_SLOT0, TypeST>, Enc_11704059 {
+tc_30665cb0, TypeST>, Enc_ecbcc8 {
let Inst{13-0} = 0b00000000000000;
let Inst{31-21} = 0b10100000000;
let isSoloAin1 = 1;
@@ -45507,7 +45868,7 @@ def Y2_dccleaninva : HInst<
(outs),
(ins IntRegs:$Rs32),
"dccleaninva($Rs32)",
-ST_tc_ld_SLOT0, TypeST>, Enc_11704059 {
+tc_30665cb0, TypeST>, Enc_ecbcc8 {
let Inst{13-0} = 0b00000000000000;
let Inst{31-21} = 0b10100000010;
let isSoloAin1 = 1;
@@ -45516,7 +45877,7 @@ def Y2_dcfetch : HInst<
(outs),
(ins IntRegs:$Rs32),
"dcfetch($Rs32)",
-PSEUDO, TypeMAPPING> {
+tc_34e882a4, TypeMAPPING> {
let hasSideEffects = 1;
let isPseudo = 1;
let isCodeGenOnly = 1;
@@ -45525,7 +45886,7 @@ def Y2_dcfetchbo : HInst<
(outs),
(ins IntRegs:$Rs32, u11_3Imm:$Ii),
"dcfetch($Rs32+#$Ii)",
-LD_tc_ld_SLOT0, TypeLD>, Enc_4983213 {
+tc_ef0ebaaa, TypeLD>, Enc_2d829e {
let Inst{13-11} = 0b000;
let Inst{31-21} = 0b10010100000;
let addrMode = BaseImmOffset;
@@ -45535,7 +45896,7 @@ def Y2_dcinva : HInst<
(outs),
(ins IntRegs:$Rs32),
"dcinva($Rs32)",
-ST_tc_ld_SLOT0, TypeST>, Enc_11704059 {
+tc_30665cb0, TypeST>, Enc_ecbcc8 {
let Inst{13-0} = 0b00000000000000;
let Inst{31-21} = 0b10100000001;
let isSoloAin1 = 1;
@@ -45544,17 +45905,17 @@ def Y2_dczeroa : HInst<
(outs),
(ins IntRegs:$Rs32),
"dczeroa($Rs32)",
-ST_tc_ld_SLOT0, TypeST>, Enc_11704059 {
+tc_30665cb0, TypeST>, Enc_ecbcc8 {
let Inst{13-0} = 0b00000000000000;
let Inst{31-21} = 0b10100000110;
-let mayStore = 1;
let isSoloAin1 = 1;
+let mayStore = 1;
}
def Y2_icinva : HInst<
(outs),
(ins IntRegs:$Rs32),
"icinva($Rs32)",
-J_tc_2early_SLOT2, TypeJ>, Enc_11704059 {
+tc_049dfb74, TypeJ>, Enc_ecbcc8 {
let Inst{13-0} = 0b00000000000000;
let Inst{31-21} = 0b01010110110;
let isSolo = 1;
@@ -45563,7 +45924,7 @@ def Y2_isync : HInst<
(outs),
(ins),
"isync",
-J_tc_2early_SLOT2, TypeJ>, Enc_0 {
+tc_d267fa19, TypeJ>, Enc_e3b0c4 {
let Inst{13-0} = 0b00000000000010;
let Inst{31-16} = 0b0101011111000000;
let isSolo = 1;
@@ -45572,7 +45933,7 @@ def Y2_syncht : HInst<
(outs),
(ins),
"syncht",
-ST_tc_ld_SLOT0, TypeST>, Enc_0 {
+tc_ef2676fd, TypeST>, Enc_e3b0c4 {
let Inst{13-0} = 0b00000000000000;
let Inst{31-16} = 0b1010100001000000;
let isSolo = 1;
@@ -45581,7 +45942,7 @@ def Y4_l2fetch : HInst<
(outs),
(ins IntRegs:$Rs32, IntRegs:$Rt32),
"l2fetch($Rs32,$Rt32)",
-ST_tc_3stall_SLOT0, TypeST>, Enc_14620934 {
+tc_f4608adc, TypeST>, Enc_ca3887 {
let Inst{7-0} = 0b00000000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b10100110000;
@@ -45593,7 +45954,7 @@ def Y4_trace : HInst<
(outs),
(ins IntRegs:$Rs32),
"trace($Rs32)",
-CR_tc_2early_SLOT3, TypeCR>, Enc_11704059 {
+tc_4997da4a, TypeCR>, Enc_ecbcc8 {
let Inst{13-0} = 0b00000000000000;
let Inst{31-21} = 0b01100010010;
let isSoloAX = 1;
@@ -45602,7 +45963,7 @@ def Y5_l2fetch : HInst<
(outs),
(ins IntRegs:$Rs32, DoubleRegs:$Rtt32),
"l2fetch($Rs32,$Rtt32)",
-ST_tc_3stall_SLOT0, TypeST>, Enc_8943121, Requires<[HasV5T]> {
+tc_f4608adc, TypeST>, Enc_e6abcf, Requires<[HasV5T]> {
let Inst{7-0} = 0b00000000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b10100110100;
@@ -45614,31 +45975,33 @@ def dep_A2_addsat : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rs32, IntRegs:$Rt32),
"$Rd32 = add($Rs32,$Rt32):sat:deprecated",
-ALU64_tc_2_SLOT23, TypeALU64>, Enc_14071773 {
+tc_47ab9233, TypeALU64>, Enc_5ab2be {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11010101100;
let hasNewValue = 1;
let opNewValue = 0;
+let prefersSlot3 = 1;
let Defs = [USR_OVF];
}
def dep_A2_subsat : HInst<
(outs IntRegs:$Rd32),
(ins IntRegs:$Rt32, IntRegs:$Rs32),
"$Rd32 = sub($Rt32,$Rs32):sat:deprecated",
-ALU64_tc_2_SLOT23, TypeALU64>, Enc_8605375 {
+tc_47ab9233, TypeALU64>, Enc_bd6011 {
let Inst{7-5} = 0b100;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11010101100;
let hasNewValue = 1;
let opNewValue = 0;
+let prefersSlot3 = 1;
let Defs = [USR_OVF];
}
def dep_S2_packhl : HInst<
(outs DoubleRegs:$Rdd32),
(ins IntRegs:$Rs32, IntRegs:$Rt32),
"$Rdd32 = packhl($Rs32,$Rt32):deprecated",
-ALU64_tc_1_SLOT23, TypeALU64>, Enc_1997594 {
+tc_9c18c9a5, TypeALU64>, Enc_be32a5 {
let Inst{7-5} = 0b000;
let Inst{13-13} = 0b0;
let Inst{31-21} = 0b11010100000;
diff --git a/lib/Target/Hexagon/HexagonDepTimingClasses.h b/lib/Target/Hexagon/HexagonDepTimingClasses.h
new file mode 100644
index 000000000000..52963034543d
--- /dev/null
+++ b/lib/Target/Hexagon/HexagonDepTimingClasses.h
@@ -0,0 +1,132 @@
+//===--- HexagonDepTimingClasses.h ----------------------------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+static bool is_TC3x(unsigned SchedClass) {
+ switch (SchedClass) {
+ case Hexagon::Sched::tc_1000eb10:
+ case Hexagon::Sched::tc_2aaab1e0:
+ case Hexagon::Sched::tc_4997da4a:
+ case Hexagon::Sched::tc_5d806107:
+ case Hexagon::Sched::tc_6264c5e0:
+ case Hexagon::Sched::tc_69bb508b:
+ case Hexagon::Sched::tc_8c8041e6:
+ case Hexagon::Sched::tc_8cb685d9:
+ case Hexagon::Sched::tc_a12a5971:
+ case Hexagon::Sched::tc_ae0722f7:
+ case Hexagon::Sched::tc_ae2c2dc2:
+ case Hexagon::Sched::tc_bc5561d8:
+ case Hexagon::Sched::tc_d6a805a8:
+ case Hexagon::Sched::tc_f055fbb6:
+ case Hexagon::Sched::tc_feb4974b:
+ return true;
+ default:
+ return false;
+ }
+}
+
+static bool is_TC2early(unsigned SchedClass) {
+ switch (SchedClass) {
+ case Hexagon::Sched::tc_35fb9d13:
+ case Hexagon::Sched::tc_cbe45117:
+ return true;
+ default:
+ return false;
+ }
+}
+
+static bool is_TC4x(unsigned SchedClass) {
+ switch (SchedClass) {
+ case Hexagon::Sched::tc_09c86199:
+ case Hexagon::Sched::tc_2d1e6f5c:
+ case Hexagon::Sched::tc_2e55aa16:
+ case Hexagon::Sched::tc_3bea1824:
+ case Hexagon::Sched::tc_e836c161:
+ case Hexagon::Sched::tc_f1aa2cdb:
+ return true;
+ default:
+ return false;
+ }
+}
+
+static bool is_TC2(unsigned SchedClass) {
+ switch (SchedClass) {
+ case Hexagon::Sched::tc_090485bb:
+ case Hexagon::Sched::tc_1fe8323c:
+ case Hexagon::Sched::tc_37326008:
+ case Hexagon::Sched::tc_3c10f809:
+ case Hexagon::Sched::tc_47ab9233:
+ case Hexagon::Sched::tc_485bb57c:
+ case Hexagon::Sched::tc_511f28f6:
+ case Hexagon::Sched::tc_583510c7:
+ case Hexagon::Sched::tc_63cd9d2d:
+ case Hexagon::Sched::tc_76c4c5ef:
+ case Hexagon::Sched::tc_7ca2ea10:
+ case Hexagon::Sched::tc_87601822:
+ case Hexagon::Sched::tc_88fa2da6:
+ case Hexagon::Sched::tc_94e6ffd9:
+ case Hexagon::Sched::tc_ab1b5e74:
+ case Hexagon::Sched::tc_b0f50e3c:
+ case Hexagon::Sched::tc_bd16579e:
+ case Hexagon::Sched::tc_c0cd91a8:
+ case Hexagon::Sched::tc_ca280e8b:
+ case Hexagon::Sched::tc_cd321066:
+ case Hexagon::Sched::tc_d95f4e98:
+ case Hexagon::Sched::tc_e17ce9ad:
+ case Hexagon::Sched::tc_f1240c08:
+ case Hexagon::Sched::tc_faab1248:
+ return true;
+ default:
+ return false;
+ }
+}
+
+static bool is_TC1(unsigned SchedClass) {
+ switch (SchedClass) {
+ case Hexagon::Sched::tc_07ac815d:
+ case Hexagon::Sched::tc_1b6011fb:
+ case Hexagon::Sched::tc_1b834fe7:
+ case Hexagon::Sched::tc_1e062b18:
+ case Hexagon::Sched::tc_1f9668cc:
+ case Hexagon::Sched::tc_43068634:
+ case Hexagon::Sched::tc_47f0b7ad:
+ case Hexagon::Sched::tc_537e2013:
+ case Hexagon::Sched::tc_548f402d:
+ case Hexagon::Sched::tc_5fa2857c:
+ case Hexagon::Sched::tc_5fe9fcd0:
+ case Hexagon::Sched::tc_78b3c689:
+ case Hexagon::Sched::tc_7c2dcd4d:
+ case Hexagon::Sched::tc_81a23d44:
+ case Hexagon::Sched::tc_821c4233:
+ case Hexagon::Sched::tc_92d1833c:
+ case Hexagon::Sched::tc_9a13af9d:
+ case Hexagon::Sched::tc_9c18c9a5:
+ case Hexagon::Sched::tc_9df8b0dc:
+ case Hexagon::Sched::tc_9f518242:
+ case Hexagon::Sched::tc_a1fb80e1:
+ case Hexagon::Sched::tc_a333d2a9:
+ case Hexagon::Sched::tc_a87879e8:
+ case Hexagon::Sched::tc_aad55963:
+ case Hexagon::Sched::tc_b08b653e:
+ case Hexagon::Sched::tc_b324366f:
+ case Hexagon::Sched::tc_b5bfaa60:
+ case Hexagon::Sched::tc_b86c7e8b:
+ case Hexagon::Sched::tc_c58f771a:
+ case Hexagon::Sched::tc_d108a090:
+ case Hexagon::Sched::tc_d1b5a4b6:
+ case Hexagon::Sched::tc_d2609065:
+ case Hexagon::Sched::tc_d63b71d1:
+ case Hexagon::Sched::tc_e2c31426:
+ case Hexagon::Sched::tc_e8c7a357:
+ case Hexagon::Sched::tc_eb07ef6f:
+ case Hexagon::Sched::tc_f16d5b17:
+ return true;
+ default:
+ return false;
+ }
+}
diff --git a/lib/Target/Hexagon/HexagonIICHVX.td b/lib/Target/Hexagon/HexagonIICHVX.td
index 4081a225832b..1493d52f08e8 100644
--- a/lib/Target/Hexagon/HexagonIICHVX.td
+++ b/lib/Target/Hexagon/HexagonIICHVX.td
@@ -7,96 +7,12 @@
//
//===----------------------------------------------------------------------===//
-//
-// Though all these itinerary classes exist for V60 onwards, they are being
-// listed here as 'HVXV62Itin' because itinerary class description prior to V62
-// doesn't include operand cycle info. In future, I plan to merge them
-// together and call it 'HVXItin'.
-//
-class HVXV62Itin {
- list<InstrItinData> HVXV62Itin_list = [
- InstrItinData<COPROC_VMEM_vtc_long_SLOT01,
- [InstrStage<1, [SLOT0, SLOT1]>],
- [3, 1, 1, 1]>,
- InstrItinData<COPROC_VX_vtc_long_SLOT23,
- [InstrStage<1, [SLOT2, SLOT3]>],
- [3, 1, 1, 1]>,
- InstrItinData<COPROC_VX_vtc_SLOT23,
- [InstrStage<1, [SLOT2, SLOT3]>],
- [3, 1, 1, 1]>,
- InstrItinData<CVI_VA, [InstrStage<1, [SLOT0,SLOT1,SLOT2,SLOT3], 0>,
- InstrStage<1, [CVI_XLANE,CVI_SHIFT,
- CVI_MPY0, CVI_MPY1]>],
- [1, 1, 1, 1]>,
- InstrItinData<CVI_VA_DV, [InstrStage<1, [SLOT0,SLOT1,SLOT2,SLOT3], 0>,
- InstrStage<1, [CVI_XLSHF, CVI_MPY01]>],
- [1, 1, 1, 1]>,
- InstrItinData<CVI_VX_LONG, [InstrStage<1, [SLOT2, SLOT3], 0>,
- InstrStage<1, [CVI_MPY0, CVI_MPY1]>],
- [1, 1, 1, 1]>,
- InstrItinData<CVI_VX_LATE, [InstrStage<1, [SLOT2, SLOT3], 0>,
- InstrStage<1, [CVI_MPY0, CVI_MPY1]>],
- [1, 1, 1, 1]>,
- InstrItinData<CVI_VX, [InstrStage<1, [SLOT2, SLOT3], 0>,
- InstrStage<1, [CVI_MPY0, CVI_MPY1]>],
- [1, 1, 1, 1]>,
- InstrItinData<CVI_VX_DV_LONG, [InstrStage<1, [SLOT2, SLOT3], 0>,
- InstrStage<1, [CVI_MPY01]>], [1, 1, 1, 1]>,
- InstrItinData<CVI_VX_DV, [InstrStage<1, [SLOT2, SLOT3], 0>,
- InstrStage<1, [CVI_MPY01]>], [1, 1, 1, 1]>,
- InstrItinData<CVI_VX_DV_SLOT2, [InstrStage<1, [SLOT2], 0>,
- InstrStage<1, [CVI_MPY01]>], [1, 1, 1, 1]>,
- InstrItinData<CVI_VX_DV_SLOT2_LONG_EARLY,
- [InstrStage<1, [SLOT2], 0>,
- InstrStage<1, [CVI_MPY01]>], [1, 1, 1, 1]>,
- InstrItinData<CVI_VP, [InstrStage<1, [SLOT0,SLOT1,SLOT2,SLOT3], 0>,
- InstrStage<1, [CVI_XLANE]>], [1, 1, 1, 1]>,
- InstrItinData<CVI_VP_LONG, [InstrStage<1, [SLOT0,SLOT1,SLOT2,SLOT3], 0>,
- InstrStage<1, [CVI_XLANE]>], [1, 1, 1, 1]>,
- InstrItinData<CVI_VP_VS_EARLY, [InstrStage<1, [SLOT0,SLOT1,SLOT2,SLOT3], 0>,
- InstrStage<1, [CVI_XLSHF]>], [1, 1, 1, 1]>,
- InstrItinData<CVI_VP_VS_LONG, [InstrStage<1, [SLOT0,SLOT1,SLOT2,SLOT3], 0>,
- InstrStage<1, [CVI_XLSHF]>], [1, 1, 1, 1]>,
- InstrItinData<CVI_VP_VS, [InstrStage<1, [SLOT0,SLOT1,SLOT2,SLOT3], 0>,
- InstrStage<1, [CVI_XLSHF]>], [1, 1, 1, 1]>,
- InstrItinData<CVI_VP_VS_LONG_EARLY,
- [InstrStage<1, [SLOT0,SLOT1,SLOT2,SLOT3], 0>,
- InstrStage<1, [CVI_XLSHF]>], [1, 1, 1, 1]>,
- InstrItinData<CVI_VP_DV, [InstrStage<1, [SLOT0,SLOT1,SLOT2,SLOT3], 0>,
- InstrStage<1, [CVI_XLSHF]>], [1, 1, 1, 1]>,
- InstrItinData<CVI_VS, [InstrStage<1, [SLOT0,SLOT1,SLOT2,SLOT3], 0>,
- InstrStage<1, [CVI_SHIFT]>], [1, 1, 1, 1]>,
- InstrItinData<CVI_VINLANESAT, [InstrStage<1, [SLOT0,SLOT1,SLOT2,SLOT3], 0>,
- InstrStage<1, [CVI_XLANE, CVI_SHIFT,
- CVI_MPY0, CVI_MPY1]>],
- [1, 1, 1, 1]>,
- InstrItinData<CVI_VM_LD, [InstrStage<1, [SLOT0, SLOT1], 0>,
- InstrStage<1, [CVI_LD], 0>,
- InstrStage<1, [CVI_XLANE, CVI_SHIFT,
- CVI_MPY0, CVI_MPY1]>],
- [1, 1, 1, 1]>,
- InstrItinData<CVI_VM_TMP_LD, [InstrStage<1,[SLOT0, SLOT1], 0>,
- InstrStage<1, [CVI_LD]>],[1, 1, 1, 1, 10]>,
- InstrItinData<CVI_VM_CUR_LD, [InstrStage<1,[SLOT0, SLOT1], 0>,
- InstrStage<1, [CVI_LD], 0>,
- InstrStage<1, [CVI_XLANE, CVI_SHIFT,
- CVI_MPY0, CVI_MPY1]>],
- [1, 1, 1, 1]>,
- InstrItinData<CVI_VM_VP_LDU, [InstrStage<1,[SLOT0], 0>,
- InstrStage<1, [SLOT1], 0>,
- InstrStage<1, [CVI_LD], 0>,
- InstrStage<1, [CVI_XLANE]>], [1, 1, 1, 1]>,
- InstrItinData<CVI_VM_ST, [InstrStage<1, [SLOT0], 0>,
- InstrStage<1, [CVI_ST], 0>,
- InstrStage<1, [CVI_XLANE, CVI_SHIFT,
- CVI_MPY0, CVI_MPY1]>],
- [1, 1, 1, 1]>,
- InstrItinData<CVI_VM_NEW_ST, [InstrStage<1,[SLOT0], 0>,
- InstrStage<1, [CVI_ST]>], [1, 1, 1, 1]>,
- InstrItinData<CVI_VM_STU, [InstrStage<1, [SLOT0], 0>,
- InstrStage<1, [SLOT1], 0>,
- InstrStage<1, [CVI_ST], 0>,
- InstrStage<1, [CVI_XLANE]>], [1, 1, 1, 1]>,
- InstrItinData<CVI_HIST, [InstrStage<1, [SLOT0,SLOT1,SLOT2,SLOT3], 0>,
- InstrStage<1, [CVI_ALL]>], [1, 1, 1, 1]>];
+def CVI_VA : InstrItinClass;
+
+class HVXItin {
+ list<InstrItinData> HVXItin_list = [
+ InstrItinData<CVI_VA,
+ [InstrStage<1, [SLOT0,SLOT1,SLOT2,SLOT3], 0>,
+ InstrStage<1, [CVI_XLANE,CVI_SHIFT, CVI_MPY0, CVI_MPY1]>],
+ [9, 7, 7, 7], [HVX_FWD, HVX_FWD, HVX_FWD]>];
}
diff --git a/lib/Target/Hexagon/HexagonIICScalar.td b/lib/Target/Hexagon/HexagonIICScalar.td
index e69cfbdad688..5fe713346e38 100644
--- a/lib/Target/Hexagon/HexagonIICScalar.td
+++ b/lib/Target/Hexagon/HexagonIICScalar.td
@@ -11,154 +11,22 @@
// classes as per V62. Curretnly, they are just extracted from
// HexagonScheduleV62.td but will soon be auto-generated by HexagonGen.py.
+class PseudoItin {
+ list<InstrItinData> PseudoItin_list = [
+ InstrItinData<PSEUDO, [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3]>],
+ [1, 1, 1]>,
+ InstrItinData<PSEUDOM, [InstrStage<1, [SLOT2, SLOT3], 0>,
+ InstrStage<1, [SLOT2, SLOT3]>], [1, 1, 1]>,
+ InstrItinData<DUPLEX, [InstrStage<1, [SLOT0]>], [1, 1, 1]>,
+ InstrItinData<tc_ENDLOOP, [InstrStage<1, [SLOT_ENDLOOP]>], [2]>
+ ];
+}
+
class ScalarItin {
list<InstrItinData> ScalarItin_list = [
- InstrItinData<ALU32_2op_tc_1_SLOT0123 ,
- [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3]>], [1, 1, 1]>,
- InstrItinData<ALU32_2op_tc_2early_SLOT0123,
- [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3]>], [2, 1, 1]>,
- InstrItinData<ALU32_3op_tc_1_SLOT0123 ,
- [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3]>], [1, 1, 1]>,
- InstrItinData<ALU32_3op_tc_2_SLOT0123 ,
- [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3]>], [2, 1, 1]>,
- InstrItinData<ALU32_3op_tc_2early_SLOT0123,
- [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3]>], [2, 1, 1]>,
- InstrItinData<ALU32_ADDI_tc_1_SLOT0123 ,
- [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3]>], [1, 1, 1]>,
-
- // ALU64
- InstrItinData<ALU64_tc_1_SLOT23 , [InstrStage<1, [SLOT2, SLOT3]>],
- [1, 1, 1]>,
- InstrItinData<ALU64_tc_2_SLOT23 , [InstrStage<1, [SLOT2, SLOT3]>],
- [2, 1, 1]>,
- InstrItinData<ALU64_tc_2early_SLOT23, [InstrStage<1, [SLOT2, SLOT3]>],
- [2, 1, 1]>,
- InstrItinData<ALU64_tc_3x_SLOT23 , [InstrStage<1, [SLOT2, SLOT3]>],
- [3, 1, 1]>,
-
- // CR -> System
- InstrItinData<CR_tc_2_SLOT3 , [InstrStage<1, [SLOT3]>], [2, 1, 1]>,
- InstrItinData<CR_tc_2early_SLOT3 , [InstrStage<1, [SLOT3]>], [2, 1, 1]>,
- InstrItinData<CR_tc_3x_SLOT3 , [InstrStage<1, [SLOT3]>], [3, 1, 1]>,
-
- // Jump (conditional/unconditional/return etc)
- InstrItinData<CR_tc_2early_SLOT23, [InstrStage<1, [SLOT2, SLOT3]>],
- [2, 1, 1, 1]>,
- InstrItinData<CR_tc_3x_SLOT23 , [InstrStage<1, [SLOT2, SLOT3]>],
- [3, 1, 1, 1]>,
- InstrItinData<CJ_tc_1_SLOT23 , [InstrStage<1, [SLOT2, SLOT3]>],
- [1, 1, 1, 1]>,
- InstrItinData<CJ_tc_2early_SLOT23, [InstrStage<1, [SLOT2, SLOT3]>],
- [2, 1, 1, 1]>,
- InstrItinData<J_tc_2early_SLOT23 , [InstrStage<1, [SLOT2, SLOT3]>],
- [2, 1, 1, 1]>,
- InstrItinData<J_tc_2early_CJUMP_UCJUMP_ARCHDEPSLOT,
- [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3]>], [2, 1, 1, 1]>,
-
- // JR
- InstrItinData<J_tc_2early_SLOT2 , [InstrStage<1, [SLOT2]>], [2, 1, 1]>,
- InstrItinData<J_tc_3stall_SLOT2 , [InstrStage<1, [SLOT2]>], [3, 1, 1]>,
-
- // Extender
- InstrItinData<EXTENDER_tc_1_SLOT0123, [InstrStage<1,
- [SLOT0, SLOT1, SLOT2, SLOT3]>], [2, 1, 1, 1]>,
-
- // Load
- InstrItinData<LD_tc_ld_SLOT01 , [InstrStage<1, [SLOT0, SLOT1]>],
- [3, 1]>,
- InstrItinData<LD_tc_ld_pi_SLOT01 , [InstrStage<1, [SLOT0, SLOT1]>],
- [3, 1]>,
- InstrItinData<LD_tc_3or4stall_SLOT0, [InstrStage<1, [SLOT0]>], [4, 1]>,
- InstrItinData<LD_tc_ld_SLOT0 , [InstrStage<1, [SLOT0]>], [3, 1]>,
-
- // M
- InstrItinData<M_tc_1_SLOT23 , [InstrStage<1, [SLOT2, SLOT3]>],
- [1, 1, 1]>,
- InstrItinData<M_tc_2_SLOT23 , [InstrStage<1, [SLOT2, SLOT3]>],
- [2, 1, 1]>,
- InstrItinData<M_tc_2_acc_SLOT23 , [InstrStage<1, [SLOT2, SLOT3]>],
- [2, 1, 1]>,
- InstrItinData<M_tc_3_SLOT23 , [InstrStage<1, [SLOT2, SLOT3]>],
- [3, 1, 1]>,
- InstrItinData<M_tc_3x_SLOT23 , [InstrStage<1, [SLOT2, SLOT3]>],
- [3, 1, 1]>,
- InstrItinData<M_tc_3x_acc_SLOT23, [InstrStage<1, [SLOT2, SLOT3]>],
- [3, 1, 1, 1]>,
- InstrItinData<M_tc_3or4x_SLOT23 , [InstrStage<1, [SLOT2, SLOT3]>],
- [4, 1, 1]>,
- InstrItinData<M_tc_3or4x_acc_SLOT23 , [InstrStage<1, [SLOT2, SLOT3]>],
- [4, 1, 1]>,
- InstrItinData<M_tc_3stall_SLOT23, [InstrStage<1, [SLOT2, SLOT3]>],
- [3, 1, 1]>,
-
- // Store
- InstrItinData<ST_tc_st_SLOT01 , [InstrStage<1, [SLOT0, SLOT1]>],
- [1, 1, 1]>,
- InstrItinData<ST_tc_st_pi_SLOT01, [InstrStage<1, [SLOT0, SLOT1]>],
- [1, 1, 1]>,
- InstrItinData<ST_tc_3stall_SLOT0, [InstrStage<1, [SLOT0]>], [3, 1, 1]>,
- InstrItinData<ST_tc_ld_SLOT0 , [InstrStage<1, [SLOT0]>], [3, 1, 1]>,
- InstrItinData<ST_tc_st_SLOT0 , [InstrStage<1, [SLOT0]>], [1, 1, 1]>,
- InstrItinData<ST_tc_st_pi_SLOT0 , [InstrStage<1, [SLOT0]>], [1, 1, 1]>,
-
- // S
- InstrItinData<S_2op_tc_1_SLOT23 , [InstrStage<1, [SLOT2, SLOT3]>],
- [1, 1, 1]>,
- InstrItinData<S_2op_tc_2_SLOT23 , [InstrStage<1, [SLOT2, SLOT3]>],
- [2, 1, 1]>,
- InstrItinData<S_2op_tc_2early_SLOT23, [InstrStage<1, [SLOT2, SLOT3]>],
- [2, 1, 1]>,
- // The S_2op_tc_3x_SLOT23 slots are 4 cycles on v60.
- InstrItinData<S_2op_tc_3or4x_SLOT23 , [InstrStage<1, [SLOT2, SLOT3]>],
- [4, 1, 1]>,
- InstrItinData<S_3op_tc_1_SLOT23 , [InstrStage<1, [SLOT2, SLOT3]>],
- [1, 1, 1]>,
- InstrItinData<S_3op_tc_2_SLOT23 , [InstrStage<1, [SLOT2, SLOT3]>],
- [2, 1, 1]>,
- InstrItinData<S_3op_tc_2early_SLOT23, [InstrStage<1, [SLOT2, SLOT3]>],
- [2, 1, 1]>,
- InstrItinData<S_3op_tc_3_SLOT23 , [InstrStage<1, [SLOT2, SLOT3]>],
- [3, 1, 1]>,
- InstrItinData<S_3op_tc_3stall_SLOT23, [InstrStage<1, [SLOT2, SLOT3]>],
- [3, 1, 1]>,
- InstrItinData<S_3op_tc_3x_SLOT23 , [InstrStage<1, [SLOT2, SLOT3]>],
- [3, 1, 1]>,
-
- // New Value Compare Jump
- InstrItinData<NCJ_tc_3or4stall_SLOT0, [InstrStage<1, [SLOT0]>],
- [3, 1, 1, 1]>,
-
- // Mem ops
- InstrItinData<V2LDST_tc_st_SLOT0 , [InstrStage<1, [SLOT0]>],
- [1, 1, 1, 1]>,
- InstrItinData<V2LDST_tc_ld_SLOT01 , [InstrStage<1, [SLOT0, SLOT1]>],
- [2, 1, 1, 1]>,
- InstrItinData<V2LDST_tc_st_SLOT01 , [InstrStage<1, [SLOT0, SLOT1]>],
- [1, 1, 1, 1]>,
- InstrItinData<V4LDST_tc_st_SLOT0 , [InstrStage<1, [SLOT0]>],
- [1, 1, 1, 1]>,
- InstrItinData<V4LDST_tc_ld_SLOT01 , [InstrStage<1, [SLOT0, SLOT1]>],
- [3, 1, 1, 1]>,
- InstrItinData<V4LDST_tc_st_SLOT01 , [InstrStage<1, [SLOT0, SLOT1]>],
- [1, 1, 1, 1]>,
-
- // Endloop
- InstrItinData<J_tc_2early_SLOT0123, [InstrStage<1, [SLOT_ENDLOOP]>],
- [2]>,
- InstrItinData<MAPPING_tc_1_SLOT0123 ,
- [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3]>],
- [1, 1, 1, 1]>,
-
- // Duplex and Compound
- InstrItinData<DUPLEX , [InstrStage<1, [SLOT0]>], [1, 1, 1]>,
- InstrItinData<COMPOUND_CJ_ARCHDEPSLOT,
- [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3]>], [1, 1, 1]>,
- InstrItinData<COMPOUND , [InstrStage<1, [SLOT2, SLOT3]>], [1, 1, 1]>,
- // Misc
- InstrItinData<PREFIX , [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3]>],
- [1, 1, 1]>,
- InstrItinData<PSEUDO , [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3]>],
- [1, 1, 1]>,
- InstrItinData<PSEUDOM , [InstrStage<1, [SLOT2, SLOT3], 0>,
- InstrStage<1, [SLOT2, SLOT3]>], [1, 1, 1]>];
+ InstrItinData<LD_tc_ld_SLOT01, [InstrStage<1, [SLOT0, SLOT1]>],
+ [3, 1], [Hex_FWD, Hex_FWD]>,
+ InstrItinData<ST_tc_st_SLOT01, [InstrStage<1, [SLOT0, SLOT1]>],
+ [1, 1, 1], [Hex_FWD, Hex_FWD, Hex_FWD]>
+ ];
}
diff --git a/lib/Target/Hexagon/HexagonInstrFormats.td b/lib/Target/Hexagon/HexagonInstrFormats.td
index 709d64585c0b..636a439ba6a9 100644
--- a/lib/Target/Hexagon/HexagonInstrFormats.td
+++ b/lib/Target/Hexagon/HexagonInstrFormats.td
@@ -188,30 +188,10 @@ class LDInst<dag outs, dag ins, string asmstr, list<dag> pattern = [],
string cstr = "", InstrItinClass itin = LD_tc_ld_SLOT01>
: InstHexagon<outs, ins, asmstr, pattern, cstr, itin, TypeLD>, OpcodeHexagon;
-class PseudoLDInst<dag outs, dag ins, string asmstr, list<dag> pattern = [],
- string cstr = "", InstrItinClass itin = LD_tc_ld_SLOT01>
- : InstHexagon<outs, ins, asmstr, pattern, cstr, itin, TypeLD>, OpcodeHexagon;
-
class CONSTLDInst<dag outs, dag ins, string asmstr, list<dag> pattern = [],
- string cstr = "">
- : PseudoLDInst<outs, ins, asmstr, pattern, cstr>;
-
-// LD Instruction Class in V2/V3/V4.
-// Definition of the instruction class NOT CHANGED.
-class LDInstPost<dag outs, dag ins, string asmstr, list<dag> pattern = [],
- string cstr = "">
- : LDInst<outs, ins, asmstr, pattern, cstr>;
-
-let mayLoad = 1 in
-class LD0Inst<dag outs, dag ins, string asmstr, list<dag> pattern = [],
- string cstr = "", InstrItinClass itin=LD_tc_ld_SLOT0>
+ string cstr = "", InstrItinClass itin = LD_tc_ld_SLOT01>
: InstHexagon<outs, ins, asmstr, pattern, cstr, itin, TypeLD>, OpcodeHexagon;
-let mayLoad = 1 in
-class LD1Inst<dag outs, dag ins, string asmstr, list<dag> pattern = [],
- string cstr = "", InstrItinClass itin=LD_tc_ld_SLOT0>
- : InstHexagon<outs, ins, asmstr, pattern, cstr, itin, TypeLD>;
-
// ST Instruction Class in V2/V3 can take SLOT0 only.
// ST Instruction Class in V4 can take SLOT0 & SLOT1.
// Definition of the instruction class CHANGED from V2/V3 to V4.
@@ -220,124 +200,9 @@ class STInst<dag outs, dag ins, string asmstr, list<dag> pattern = [],
string cstr = "", InstrItinClass itin = ST_tc_st_SLOT01>
: InstHexagon<outs, ins, asmstr, pattern, cstr, itin, TypeST>, OpcodeHexagon;
-let mayStore = 1 in
-class STInst_NoOpcode<dag outs, dag ins, string asmstr, list<dag> pattern = [],
- string cstr = "", InstrItinClass itin = ST_tc_st_SLOT01>
- : InstHexagon<outs, ins, asmstr, pattern, cstr, itin, TypeST>;
-
-class STInst2<dag outs, dag ins, string asmstr, list<dag> pattern = [],
- string cstr = "">
- : STInst<outs, ins, asmstr, pattern, cstr>;
-
-let mayStore = 1 in
-class ST0Inst<dag outs, dag ins, string asmstr, list<dag> pattern = [],
- string cstr = "", InstrItinClass itin = ST_tc_ld_SLOT0>
- : InstHexagon<outs, ins, asmstr, pattern, cstr, itin, TypeST>, OpcodeHexagon;
-
-// Same as ST0Inst but doesn't derive from OpcodeHexagon.
-let mayStore = 1 in
-class ST1Inst<dag outs, dag ins, string asmstr, list<dag> pattern = [],
- string cstr = "", InstrItinClass itin = ST_tc_st_SLOT0>
- : InstHexagon<outs, ins, asmstr, pattern, cstr, itin, TypeST>;
-
-// ST Instruction Class in V2/V3 can take SLOT0 only.
-// ST Instruction Class in V4 can take SLOT0 & SLOT1.
-// Definition of the instruction class CHANGED from V2/V3 to V4.
-class STInstPost<dag outs, dag ins, string asmstr, list<dag> pattern = [],
- string cstr = "", InstrItinClass itin = ST_tc_st_SLOT01>
- : STInst<outs, ins, asmstr, pattern, cstr, itin>;
-
-// ALU64 Instruction Class in V2/V3.
-// XTYPE Instruction Class in V4.
-// Definition of the instruction class NOT CHANGED.
-// Name of the Instruction Class changed from ALU64 to XTYPE from V2/V3 to V4.
-class ALU64Inst<dag outs, dag ins, string asmstr, list<dag> pattern = [],
- string cstr = "", InstrItinClass itin = ALU64_tc_2_SLOT23>
- : InstHexagon<outs, ins, asmstr, pattern, cstr, itin, TypeALU64>,
- OpcodeHexagon;
-
-// ALU64 Instruction Class in V2/V3.
-// XTYPE Instruction Class in V4.
-// Definition of the instruction class NOT CHANGED.
-// Name of the Instruction Class changed from ALU64 to XTYPE from V2/V3 to V4.
-class ALU64Inst_NoOpcode<dag outs, dag ins, string asmstr, list<dag> pattern = [],
- string cstr = "", InstrItinClass itin = ALU64_tc_2_SLOT23>
- : InstHexagon<outs, ins, asmstr, pattern, cstr, itin, TypeALU64>;
-
-
-class ALU64_acc<dag outs, dag ins, string asmstr, list<dag> pattern = [],
- string cstr = "", InstrItinClass itin = ALU64_tc_2_SLOT23>
- : ALU64Inst<outs, ins, asmstr, pattern, cstr, itin>;
-
-
-// M Instruction Class in V2/V3.
-// XTYPE Instruction Class in V4.
-// Definition of the instruction class NOT CHANGED.
-// Name of the Instruction Class changed from M to XTYPE from V2/V3 to V4.
-class MInst<dag outs, dag ins, string asmstr, list<dag> pattern = [],
- string cstr = "", InstrItinClass itin = M_tc_3x_SLOT23>
- : InstHexagon<outs, ins, asmstr, pattern, cstr, itin, TypeM>,
- OpcodeHexagon;
-
-// Same as above but doesn't derive from OpcodeHexagon
-class MInst2<dag outs, dag ins, string asmstr, list<dag> pattern = [],
- string cstr = "", InstrItinClass itin = M_tc_3x_SLOT23>
- : InstHexagon<outs, ins, asmstr, pattern, cstr, itin, TypeM>;
-
-// M Instruction Class in V2/V3.
-// XTYPE Instruction Class in V4.
-// Definition of the instruction class NOT CHANGED.
-// Name of the Instruction Class changed from M to XTYPE from V2/V3 to V4.
-class MInst_acc<dag outs, dag ins, string asmstr, list<dag> pattern = [],
- string cstr = "", InstrItinClass itin = M_tc_2_SLOT23>
- : MInst<outs, ins, asmstr, pattern, cstr, itin>;
-
-// S Instruction Class in V2/V3.
-// XTYPE Instruction Class in V4.
-// Definition of the instruction class NOT CHANGED.
-// Name of the Instruction Class changed from S to XTYPE from V2/V3 to V4.
-class SInst<dag outs, dag ins, string asmstr, list<dag> pattern = [],
- string cstr = "", InstrItinClass itin = S_2op_tc_1_SLOT23>
- : InstHexagon<outs, ins, asmstr, pattern, cstr, itin, TypeS_2op>,
- OpcodeHexagon;
-
-class SInst_NoOpcode<dag outs, dag ins, string asmstr, list<dag> pattern = [],
- string cstr = "", InstrItinClass itin = S_2op_tc_1_SLOT23>
- : InstHexagon<outs, ins, asmstr, pattern, cstr, itin, TypeS_2op>;
-
-class SInst2<dag outs, dag ins, string asmstr, list<dag> pattern = [],
- string cstr = "", InstrItinClass itin = S_2op_tc_1_SLOT23>
- : InstHexagon<outs, ins, asmstr, pattern, cstr, itin, TypeS_2op>;
-
-// S Instruction Class in V2/V3.
-// XTYPE Instruction Class in V4.
-// Definition of the instruction class NOT CHANGED.
-// Name of the Instruction Class changed from S to XTYPE from V2/V3 to V4.
-class SInst_acc<dag outs, dag ins, string asmstr, list<dag> pattern = [],
- string cstr = "", InstrItinClass itin = S_3op_tc_1_SLOT23>
- : SInst<outs, ins, asmstr, pattern, cstr, itin> {
- let Type = TypeS_3op;
-}
-
-// J Instruction Class in V2/V3/V4.
-// Definition of the instruction class NOT CHANGED.
-class JInst<dag outs, dag ins, string asmstr, list<dag> pattern = [],
- string cstr = "", InstrItinClass itin = J_tc_2early_SLOT23>
- : InstHexagon<outs, ins, asmstr, pattern, cstr, itin, TypeJ>, OpcodeHexagon;
-
-class JInst_CJUMP_UCJUMP<dag outs, dag ins, string asmstr, list<dag> pattern = [],
- string cstr = "", InstrItinClass itin = J_tc_2early_CJUMP_UCJUMP_ARCHDEPSLOT>
- : InstHexagon<outs, ins, asmstr, pattern, cstr, itin, TypeJ>, OpcodeHexagon;
-
-// CR Instruction Class in V2/V3/V4.
-// Definition of the instruction class NOT CHANGED.
-class CRInst<dag outs, dag ins, string asmstr, list<dag> pattern = [],
- string cstr = "", InstrItinClass itin = CR_tc_2early_SLOT3>
- : InstHexagon<outs, ins, asmstr, pattern, cstr, itin, TypeCR>, OpcodeHexagon;
-
let isCodeGenOnly = 1, isPseudo = 1 in
class Endloop<dag outs, dag ins, string asmstr, list<dag> pattern = [],
- string cstr = "", InstrItinClass itin = J_tc_2early_SLOT0123>
+ string cstr = "", InstrItinClass itin = tc_ENDLOOP>
: InstHexagon<outs, ins, asmstr, pattern, cstr, itin, TypeENDLOOP>,
OpcodeHexagon;
@@ -357,27 +222,6 @@ class PseudoM<dag outs, dag ins, string asmstr, list<dag> pattern = [],
// Instruction Classes Definitions -
//===----------------------------------------------------------------------===//
-//
-// ALU64 patterns.
-//
-class ALU64_rr<dag outs, dag ins, string asmstr, list<dag> pattern = [],
- string cstr = "", InstrItinClass itin = ALU64_tc_1_SLOT23>
- : ALU64Inst<outs, ins, asmstr, pattern, cstr, itin>;
-
-class ALU64_ri<dag outs, dag ins, string asmstr, list<dag> pattern = [],
- string cstr = "", InstrItinClass itin = ALU64_tc_1_SLOT23>
- : ALU64Inst<outs, ins, asmstr, pattern, cstr, itin>;
-
-// Post increment ST Instruction.
-class STInstPI<dag outs, dag ins, string asmstr, list<dag> pattern = [],
- string cstr = "">
- : STInst<outs, ins, asmstr, pattern, cstr>;
-
-// Post increment LD Instruction.
-class LDInstPI<dag outs, dag ins, string asmstr, list<dag> pattern = [],
- string cstr = "">
- : LDInst<outs, ins, asmstr, pattern, cstr>;
-
//===----------------------------------------------------------------------===//
// V4 Instruction Format Definitions +
//===----------------------------------------------------------------------===//
@@ -385,7 +229,7 @@ class LDInstPI<dag outs, dag ins, string asmstr, list<dag> pattern = [],
include "HexagonInstrFormatsV4.td"
//===----------------------------------------------------------------------===//
-// V4 Instruction Format Definitions +
+// V55 Instruction Format Definitions +
//===----------------------------------------------------------------------===//
//===----------------------------------------------------------------------===//
@@ -395,5 +239,5 @@ include "HexagonInstrFormatsV4.td"
include "HexagonInstrFormatsV60.td"
//===----------------------------------------------------------------------===//
-// V60 Instruction Format Definitions +
+// V62 Instruction Format Definitions +
//===----------------------------------------------------------------------===//
diff --git a/lib/Target/Hexagon/HexagonInstrFormatsV4.td b/lib/Target/Hexagon/HexagonInstrFormatsV4.td
index 1fdf930c62fd..c5fa25995212 100644
--- a/lib/Target/Hexagon/HexagonInstrFormatsV4.td
+++ b/lib/Target/Hexagon/HexagonInstrFormatsV4.td
@@ -1,4 +1,4 @@
-//==- HexagonInstrFormats.td - Hexagon Instruction Formats --*- tablegen -*-==//
+//==- HexagonInstrFormatsV4.td - Hexagon Instruction Formats --*- tablegen -==//
//
// The LLVM Compiler Infrastructure
//
@@ -85,64 +85,3 @@ class InstDuplex<bits<4> iClass, list<dag> pattern = [],
bits<2> opExtentAlign = 0;
let TSFlags{28-27} = opExtentAlign; // Alignment exponent before extending.
}
-
-//----------------------------------------------------------------------------//
-// Instruction Classes Definitions
-//----------------------------------------------------------------------------//
-
-//
-// NV type instructions.
-//
-class NVInst<dag outs, dag ins, string asmstr, list<dag> pattern = [],
- string cstr = "", InstrItinClass itin = NCJ_tc_3or4stall_SLOT0>
- : InstHexagon<outs, ins, asmstr, pattern, cstr, itin, TypeNCJ>, OpcodeHexagon;
-
-class NVInst_V4<dag outs, dag ins, string asmstr, list<dag> pattern = [],
- string cstr = "", InstrItinClass itin = NCJ_tc_3or4stall_SLOT0>
- : NVInst<outs, ins, asmstr, pattern, cstr, itin>;
-
-// Definition of Post increment new value store.
-class NVInstPost_V4<dag outs, dag ins, string asmstr, list<dag> pattern = [],
- string cstr = "", InstrItinClass itin = ST_tc_st_SLOT0>
- : NVInst<outs, ins, asmstr, pattern, cstr, itin>;
-
-// Post increment ST Instruction.
-let mayStore = 1 in
-class NVInstPI_V4<dag outs, dag ins, string asmstr, list<dag> pattern = [],
- string cstr = "", InstrItinClass itin = ST_tc_st_SLOT0>
- : NVInst<outs, ins, asmstr, pattern, cstr, itin>;
-
-// New-value conditional branch.
-class NCJInst<dag outs, dag ins, string asmstr, list<dag> pattern = [],
- string cstr = "">
- : NVInst<outs, ins, asmstr, pattern, cstr>;
-
-let mayLoad = 1, mayStore = 1 in
-class MEMInst<dag outs, dag ins, string asmstr, list<dag> pattern = [],
- string cstr = "", InstrItinClass itin = V4LDST_tc_st_SLOT0>
- : InstHexagon<outs, ins, asmstr, pattern, cstr, itin, TypeV4LDST>,
- OpcodeHexagon;
-
-class MEMInst_V4<dag outs, dag ins, string asmstr, list<dag> pattern = [],
- string cstr = "", InstrItinClass itin = V4LDST_tc_st_SLOT0>
- : MEMInst<outs, ins, asmstr, pattern, cstr, itin>;
-
-class EXTENDERInst<dag outs, dag ins, string asmstr, list<dag> pattern = []>
- : InstHexagon<outs, ins, asmstr, pattern, "", EXTENDER_tc_1_SLOT0123,
- TypeEXTENDER>, OpcodeHexagon;
-
-class SUBInst<dag outs, dag ins, string asmstr, list<dag> pattern = [],
- string cstr = "">
- : InstHexagon<outs, ins, asmstr, pattern, "", PREFIX, TypeDUPLEX>,
- OpcodeHexagon;
-
-class CJInst<dag outs, dag ins, string asmstr, list<dag> pattern = [],
- string cstr = "">
- : InstHexagon<outs, ins, asmstr, pattern, cstr, COMPOUND_CJ_ARCHDEPSLOT, TypeCJ>,
- OpcodeHexagon;
-
-class CJInst_JMPSET<dag outs, dag ins, string asmstr, list<dag> pattern = [],
- string cstr = "">
- : InstHexagon<outs, ins, asmstr, pattern, cstr, COMPOUND, TypeCJ>,
- OpcodeHexagon;
-
diff --git a/lib/Target/Hexagon/HexagonInstrFormatsV60.td b/lib/Target/Hexagon/HexagonInstrFormatsV60.td
index b913727972e5..14bda0e0107d 100644
--- a/lib/Target/Hexagon/HexagonInstrFormatsV60.td
+++ b/lib/Target/Hexagon/HexagonInstrFormatsV60.td
@@ -20,183 +20,3 @@ class CVI_VA_Resource<dag outs, dag ins, string asmstr,
InstrItinClass itin = CVI_VA>
: InstHexagon<outs, ins, asmstr, pattern, cstr, itin, TypeCVI_VA>,
OpcodeHexagon, Requires<[HasV60T, UseHVX]>;
-
-class CVI_VA_DV_Resource<dag outs, dag ins, string asmstr,
- list<dag> pattern = [], string cstr = "",
- InstrItinClass itin = CVI_VA_DV>
- : InstHexagon<outs, ins, asmstr, pattern, cstr, itin, TypeCVI_VA_DV>,
- OpcodeHexagon, Requires<[HasV60T, UseHVX]>;
-
-class CVI_VX_Resource_long<dag outs, dag ins, string asmstr,
- list<dag> pattern = [], string cstr = "",
- InstrItinClass itin = CVI_VX_LONG>
- : InstHexagon<outs, ins, asmstr, pattern, cstr, itin, TypeCVI_VX>,
- OpcodeHexagon, Requires<[HasV60T, UseHVX]>;
-
-class CVI_VX_Resource_late<dag outs, dag ins, string asmstr,
- list<dag> pattern = [], string cstr = "",
- InstrItinClass itin = CVI_VX_LATE>
- : InstHexagon<outs, ins, asmstr, pattern, cstr, itin, TypeCVI_VX>,
- Requires<[HasV60T, UseHVX]>;
-
-class CVI_VX_Resource<dag outs, dag ins, string asmstr,
- list<dag> pattern = [], string cstr = "",
- InstrItinClass itin = CVI_VX>
- : InstHexagon<outs, ins, asmstr, pattern, cstr, itin, TypeCVI_VX>,
- OpcodeHexagon, Requires<[HasV60T, UseHVX]>;
-
-class CVI_VX_DV_Resource<dag outs, dag ins, string asmstr,
- list<dag> pattern = [], string cstr = "",
- InstrItinClass itin = CVI_VX_DV>
- : InstHexagon<outs, ins, asmstr, pattern, cstr, itin, TypeCVI_VX_DV>,
- OpcodeHexagon, Requires<[HasV60T, UseHVX]>;
-
-class CVI_VX_DV_Slot2_Resource<dag outs, dag ins, string asmstr,
- list<dag> pattern = [], string cstr = "",
- InstrItinClass itin = CVI_VX_DV_SLOT2>
- : InstHexagon<outs, ins, asmstr, pattern, cstr, itin, TypeCVI_VX_DV>,
- OpcodeHexagon, Requires<[HasV60T, UseHVX]>;
-
-class CVI_VX_DV_Resource_long<dag outs, dag ins, string asmstr,
- list<dag> pattern = [], string cstr = "",
- InstrItinClass itin = CVI_VX_DV_LONG>
- : InstHexagon<outs, ins, asmstr, pattern, cstr, itin, TypeCVI_VX_DV>,
- OpcodeHexagon, Requires<[HasV60T, UseHVX]>;
-
-class CVI_VP_Resource_long<dag outs, dag ins, string asmstr,
- list<dag> pattern = [], string cstr = "",
- InstrItinClass itin = CVI_VP_LONG>
- : InstHexagon<outs, ins, asmstr, pattern, cstr, itin, TypeCVI_VP>,
- OpcodeHexagon, Requires<[HasV60T, UseHVX]>;
-
-class CVI_VP_VS_Resource_early<dag outs, dag ins, string asmstr,
- list<dag> pattern = [], string cstr = "",
- InstrItinClass itin = CVI_VP_VS_EARLY>
- : InstHexagon<outs, ins, asmstr, pattern, cstr, itin, TypeCVI_VP_VS>,
- OpcodeHexagon, Requires<[HasV60T, UseHVX]>;
-
-class CVI_VP_VS_Resource_long<dag outs, dag ins, string asmstr,
- list<dag> pattern = [], string cstr = "",
- InstrItinClass itin = CVI_VP_VS_LONG>
- : InstHexagon<outs, ins, asmstr, pattern, cstr, itin, TypeCVI_VP_VS>,
- OpcodeHexagon, Requires<[HasV60T, UseHVX]>;
-
-class CVI_VP_VS_Resource_long_early<dag outs, dag ins, string asmstr,
- list<dag> pattern = [], string cstr = "",
- InstrItinClass itin = CVI_VP_VS_LONG_EARLY>
- : InstHexagon<outs, ins, asmstr, pattern, cstr, itin, TypeCVI_VP_VS>,
- OpcodeHexagon, Requires<[HasV60T, UseHVX]>;
-
-class CVI_VS_Resource<dag outs, dag ins, string asmstr,
- list<dag> pattern = [], string cstr = "",
- InstrItinClass itin = CVI_VS>
- : InstHexagon<outs, ins, asmstr, pattern, cstr, itin, TypeCVI_VS>,
- OpcodeHexagon, Requires<[HasV60T, UseHVX]>;
-
-class CVI_VINLANESAT_Resource<dag outs, dag ins, string asmstr,
- list<dag> pattern = [], string cstr = "",
- InstrItinClass itin = CVI_VINLANESAT>
- : InstHexagon<outs, ins, asmstr, pattern, cstr, itin, TypeCVI_VINLANESAT>,
- OpcodeHexagon, Requires<[HasV60T, UseHVX]>;
-
-class CVI_VS_Resource_long<dag outs, dag ins, string asmstr,
- list<dag> pattern = [], string cstr = "",
- InstrItinClass itin = CVI_VS>
- : InstHexagon<outs, ins, asmstr, pattern, cstr, itin, TypeCVI_VS>,
- OpcodeHexagon, Requires<[HasV60T, UseHVX]>;
-
-class CVI_VM_LD_Resource<dag outs, dag ins, string asmstr,
- list<dag> pattern = [], string cstr = "",
- InstrItinClass itin = CVI_VM_LD>
- : InstHexagon<outs, ins, asmstr, pattern, cstr, itin, TypeCVI_VM_LD>,
- OpcodeHexagon, Requires<[HasV60T, UseHVX]>;
-
-class CVI_VM_LD_Resource_long<dag outs, dag ins, string asmstr,
- list<dag> pattern = [], string cstr = "",
- InstrItinClass itin = CVI_VM_LD>
- : InstHexagon<outs, ins, asmstr, pattern, cstr, itin, TypeCVI_VM_LD>,
- OpcodeHexagon, Requires<[HasV60T, UseHVX]>;
-
-class CVI_VM_TMP_LD_Resource<dag outs, dag ins, string asmstr,
- list<dag> pattern = [], string cstr = "",
- InstrItinClass itin = CVI_VM_TMP_LD>
- : InstHexagon<outs, ins, asmstr, pattern, cstr, itin, TypeCVI_VM_TMP_LD>,
- OpcodeHexagon, Requires<[HasV60T, UseHVX]>;
-
-class CVI_VM_TMP_LD_Resource_long<dag outs, dag ins, string asmstr,
- list<dag> pattern = [], string cstr = "",
- InstrItinClass itin = CVI_VM_TMP_LD>
- : InstHexagon<outs, ins, asmstr, pattern, cstr, itin, TypeCVI_VM_TMP_LD>,
- OpcodeHexagon, Requires<[HasV60T, UseHVX]>;
-
-class CVI_VM_VP_LDU_Resource<dag outs, dag ins, string asmstr,
- list<dag> pattern = [], string cstr = "",
- InstrItinClass itin = CVI_VM_VP_LDU>
- : InstHexagon<outs, ins, asmstr, pattern, cstr, itin, TypeCVI_VM_VP_LDU>,
- OpcodeHexagon, Requires<[HasV60T, UseHVX]>;
-
-class CVI_VM_VP_LDU_Resource_long<dag outs, dag ins, string asmstr,
- list<dag> pattern = [], string cstr = "",
- InstrItinClass itin = CVI_VM_VP_LDU>
- : InstHexagon<outs, ins, asmstr, pattern, cstr, itin, TypeCVI_VM_VP_LDU>,
- OpcodeHexagon, Requires<[HasV60T, UseHVX]>;
-
-class CVI_VM_ST_Resource<dag outs, dag ins, string asmstr,
- list<dag> pattern = [], string cstr = "",
- InstrItinClass itin = CVI_VM_ST>
- : InstHexagon<outs, ins, asmstr, pattern, cstr, itin, TypeCVI_VM_ST>,
- OpcodeHexagon, Requires<[HasV60T, UseHVX]>;
-
-class CVI_VM_ST_Resource_long<dag outs, dag ins, string asmstr,
- list<dag> pattern = [], string cstr = "",
- InstrItinClass itin = CVI_VM_ST>
- : InstHexagon<outs, ins, asmstr, pattern, cstr, itin, TypeCVI_VM_ST>,
- OpcodeHexagon, Requires<[HasV60T, UseHVX]>;
-
-class CVI_VM_NEW_ST_Resource<dag outs, dag ins, string asmstr,
- list<dag> pattern = [], string cstr = "",
- InstrItinClass itin = CVI_VM_NEW_ST>
- : InstHexagon<outs, ins, asmstr, pattern, cstr, itin, TypeCVI_VM_NEW_ST>,
- OpcodeHexagon, Requires<[HasV60T, UseHVX]>;
-
-class CVI_VM_NEW_ST_Resource_long<dag outs, dag ins, string asmstr,
- list<dag> pattern = [], string cstr = "",
- InstrItinClass itin = CVI_VM_NEW_ST>
- : InstHexagon<outs, ins, asmstr, pattern, cstr, itin, TypeCVI_VM_NEW_ST>,
- OpcodeHexagon, Requires<[HasV60T, UseHVX]>;
-
-class CVI_VM_STU_Resource<dag outs, dag ins, string asmstr,
- list<dag> pattern = [], string cstr = "",
- InstrItinClass itin = CVI_VM_STU>
- : InstHexagon<outs, ins, asmstr, pattern, cstr, itin, TypeCVI_VM_STU>,
- OpcodeHexagon, Requires<[HasV60T, UseHVX]>;
-
-class CVI_VM_STU_Resource_long<dag outs, dag ins, string asmstr,
- list<dag> pattern = [], string cstr = "",
- InstrItinClass itin = CVI_VM_STU>
- : InstHexagon<outs, ins, asmstr, pattern, cstr, itin, TypeCVI_VM_STU>,
- OpcodeHexagon, Requires<[HasV60T, UseHVX]>;
-
-class CVI_HIST_Resource<dag outs, dag ins, string asmstr,
- list<dag> pattern = [], string cstr = "",
- InstrItinClass itin = CVI_HIST>
- : InstHexagon<outs, ins, asmstr, pattern, cstr, itin, TypeCVI_HIST>,
- OpcodeHexagon, Requires<[HasV60T, UseHVX]>;
-
-class CVI_VA_Resource1<dag outs, dag ins, string asmstr,
- list<dag> pattern = [], string cstr = "",
- InstrItinClass itin = CVI_VA>
- : InstHexagon<outs, ins, asmstr, pattern, cstr, itin, TypeCVI_VA>,
- Requires<[HasV60T, UseHVX]>;
-
-class CVI_VX_DV_Resource1<dag outs, dag ins, string asmstr,
- list<dag> pattern = [], string cstr = "",
- InstrItinClass itin = CVI_VX_DV>
- : InstHexagon<outs, ins, asmstr, pattern, cstr, itin, TypeCVI_VX_DV>,
- Requires<[HasV60T, UseHVX]>;
-
-class CVI_HIST_Resource1<dag outs, dag ins, string asmstr,
- list<dag> pattern = [], string cstr = "",
- InstrItinClass itin = CVI_HIST>
- : InstHexagon<outs, ins, asmstr, pattern, cstr, itin, TypeCVI_HIST>,
- Requires<[HasV60T, UseHVX]>;
diff --git a/lib/Target/Hexagon/HexagonInstrInfo.cpp b/lib/Target/Hexagon/HexagonInstrInfo.cpp
index 852bfb1b4f54..03794511414e 100644
--- a/lib/Target/Hexagon/HexagonInstrInfo.cpp
+++ b/lib/Target/Hexagon/HexagonInstrInfo.cpp
@@ -59,6 +59,7 @@ using namespace llvm;
#define GET_INSTRMAP_INFO
#include "HexagonGenInstrInfo.inc"
#include "HexagonGenDFAPacketizer.inc"
+#include "HexagonDepTimingClasses.h"
cl::opt<bool> ScheduleInlineAsm("hexagon-sched-inline-asm", cl::Hidden,
cl::init(false), cl::desc("Do not consider inline-asm a scheduling/"
@@ -1466,7 +1467,15 @@ bool HexagonInstrInfo::DefinesPredicate(
}
bool HexagonInstrInfo::isPredicable(const MachineInstr &MI) const {
- return MI.getDesc().isPredicable();
+ if (!MI.getDesc().isPredicable())
+ return false;
+
+ if (MI.isCall() || isTailCall(MI)) {
+ const MachineFunction &MF = *MI.getParent()->getParent();
+ if (!MF.getSubtarget<HexagonSubtarget>().usePredicatedCalls())
+ return false;
+ }
+ return true;
}
bool HexagonInstrInfo::isSchedulingBoundary(const MachineInstr &MI,
@@ -1643,6 +1652,7 @@ unsigned HexagonInstrInfo::getInstrLatency(const InstrItineraryData *ItinData,
return getInstrTimingClassLatency(ItinData, MI);
}
+
DFAPacketizer *HexagonInstrInfo::CreateTargetScheduleState(
const TargetSubtargetInfo &STI) const {
const InstrItineraryData *II = STI.getInstrItineraryData();
@@ -2047,9 +2057,7 @@ bool HexagonInstrInfo::isEarlySourceInstr(const MachineInstr &MI) const {
// Multiply
unsigned SchedClass = MI.getDesc().getSchedClass();
- if (SchedClass == Hexagon::Sched::M_tc_3or4x_SLOT23)
- return true;
- return false;
+ return is_TC4x(SchedClass) || is_TC3x(SchedClass);
}
bool HexagonInstrInfo::isEndLoopN(unsigned Opcode) const {
@@ -2117,7 +2125,7 @@ bool HexagonInstrInfo::isFloat(const MachineInstr &MI) const {
// No V60 HVX VMEM with A_INDIRECT.
bool HexagonInstrInfo::isHVXMemWithAIndirect(const MachineInstr &I,
const MachineInstr &J) const {
- if (!isV60VectorInstruction(I))
+ if (!isHVXVec(I))
return false;
if (!I.mayLoad() && !I.mayStore())
return false;
@@ -2241,30 +2249,13 @@ bool HexagonInstrInfo::isLateResultInstr(const MachineInstr &MI) const {
}
unsigned SchedClass = MI.getDesc().getSchedClass();
-
- switch (SchedClass) {
- case Hexagon::Sched::ALU32_2op_tc_1_SLOT0123:
- case Hexagon::Sched::ALU32_3op_tc_1_SLOT0123:
- case Hexagon::Sched::ALU32_ADDI_tc_1_SLOT0123:
- case Hexagon::Sched::ALU64_tc_1_SLOT23:
- case Hexagon::Sched::EXTENDER_tc_1_SLOT0123:
- case Hexagon::Sched::S_2op_tc_1_SLOT23:
- case Hexagon::Sched::S_3op_tc_1_SLOT23:
- case Hexagon::Sched::V2LDST_tc_ld_SLOT01:
- case Hexagon::Sched::V2LDST_tc_st_SLOT0:
- case Hexagon::Sched::V2LDST_tc_st_SLOT01:
- case Hexagon::Sched::V4LDST_tc_ld_SLOT01:
- case Hexagon::Sched::V4LDST_tc_st_SLOT0:
- case Hexagon::Sched::V4LDST_tc_st_SLOT01:
- return false;
- }
- return true;
+ return !is_TC1(SchedClass);
}
bool HexagonInstrInfo::isLateSourceInstr(const MachineInstr &MI) const {
// Instructions with iclass A_CVI_VX and attribute A_CVI_LATE uses a multiply
// resource, but all operands can be received late like an ALU instruction.
- return MI.getDesc().getSchedClass() == Hexagon::Sched::CVI_VX_LATE;
+ return getType(MI) == HexagonII::TypeCVI_VX_LATE;
}
bool HexagonInstrInfo::isLoopN(const MachineInstr &MI) const {
@@ -2507,61 +2498,22 @@ bool HexagonInstrInfo::isTailCall(const MachineInstr &MI) const {
// Returns true when SU has a timing class TC1.
bool HexagonInstrInfo::isTC1(const MachineInstr &MI) const {
unsigned SchedClass = MI.getDesc().getSchedClass();
- switch (SchedClass) {
- case Hexagon::Sched::ALU32_2op_tc_1_SLOT0123:
- case Hexagon::Sched::ALU32_3op_tc_1_SLOT0123:
- case Hexagon::Sched::ALU32_ADDI_tc_1_SLOT0123:
- case Hexagon::Sched::ALU64_tc_1_SLOT23:
- case Hexagon::Sched::EXTENDER_tc_1_SLOT0123:
- //case Hexagon::Sched::M_tc_1_SLOT23:
- case Hexagon::Sched::S_2op_tc_1_SLOT23:
- case Hexagon::Sched::S_3op_tc_1_SLOT23:
- return true;
-
- default:
- return false;
- }
+ return is_TC1(SchedClass);
}
bool HexagonInstrInfo::isTC2(const MachineInstr &MI) const {
unsigned SchedClass = MI.getDesc().getSchedClass();
- switch (SchedClass) {
- case Hexagon::Sched::ALU32_3op_tc_2_SLOT0123:
- case Hexagon::Sched::ALU64_tc_2_SLOT23:
- case Hexagon::Sched::CR_tc_2_SLOT3:
- case Hexagon::Sched::M_tc_2_SLOT23:
- case Hexagon::Sched::S_2op_tc_2_SLOT23:
- case Hexagon::Sched::S_3op_tc_2_SLOT23:
- return true;
-
- default:
- return false;
- }
+ return is_TC2(SchedClass);
}
bool HexagonInstrInfo::isTC2Early(const MachineInstr &MI) const {
unsigned SchedClass = MI.getDesc().getSchedClass();
- switch (SchedClass) {
- case Hexagon::Sched::ALU32_2op_tc_2early_SLOT0123:
- case Hexagon::Sched::ALU32_3op_tc_2early_SLOT0123:
- case Hexagon::Sched::ALU64_tc_2early_SLOT23:
- case Hexagon::Sched::CR_tc_2early_SLOT23:
- case Hexagon::Sched::CR_tc_2early_SLOT3:
- case Hexagon::Sched::J_tc_2early_SLOT0123:
- case Hexagon::Sched::J_tc_2early_SLOT2:
- case Hexagon::Sched::J_tc_2early_SLOT23:
- case Hexagon::Sched::S_2op_tc_2early_SLOT23:
- case Hexagon::Sched::S_3op_tc_2early_SLOT23:
- return true;
-
- default:
- return false;
- }
+ return is_TC2early(SchedClass);
}
bool HexagonInstrInfo::isTC4x(const MachineInstr &MI) const {
unsigned SchedClass = MI.getDesc().getSchedClass();
- return SchedClass == Hexagon::Sched::M_tc_3or4x_SLOT23;
+ return is_TC4x(SchedClass);
}
// Schedule this ASAP.
@@ -2583,7 +2535,7 @@ bool HexagonInstrInfo::isToBeScheduledASAP(const MachineInstr &MI1,
return false;
}
-bool HexagonInstrInfo::isV60VectorInstruction(const MachineInstr &MI) const {
+bool HexagonInstrInfo::isHVXVec(const MachineInstr &MI) const {
const uint64_t V = getType(MI);
return HexagonII::TypeCVI_FIRST <= V && V <= HexagonII::TypeCVI_LAST;
}
@@ -2782,7 +2734,7 @@ bool HexagonInstrInfo::isValidOffset(unsigned Opcode, int Offset,
}
bool HexagonInstrInfo::isVecAcc(const MachineInstr &MI) const {
- return isV60VectorInstruction(MI) && isAccumulator(MI);
+ return isHVXVec(MI) && isAccumulator(MI);
}
bool HexagonInstrInfo::isVecALU(const MachineInstr &MI) const {
@@ -2888,7 +2840,7 @@ bool HexagonInstrInfo::isZeroExtendingLoad(const MachineInstr &MI) const {
// Add latency to instruction.
bool HexagonInstrInfo::addLatencyToSchedule(const MachineInstr &MI1,
const MachineInstr &MI2) const {
- if (isV60VectorInstruction(MI1) && isV60VectorInstruction(MI2))
+ if (isHVXVec(MI1) && isHVXVec(MI2))
if (!isVecUsableNextPacket(MI1, MI2))
return true;
return false;
@@ -3013,7 +2965,7 @@ bool HexagonInstrInfo::mayBeNewStore(const MachineInstr &MI) const {
bool HexagonInstrInfo::producesStall(const MachineInstr &ProdMI,
const MachineInstr &ConsMI) const {
// There is no stall when ProdMI is not a V60 vector.
- if (!isV60VectorInstruction(ProdMI))
+ if (!isHVXVec(ProdMI))
return false;
// There is no stall when ProdMI and ConsMI are not dependent.
@@ -3031,7 +2983,7 @@ bool HexagonInstrInfo::producesStall(const MachineInstr &ProdMI,
bool HexagonInstrInfo::producesStall(const MachineInstr &MI,
MachineBasicBlock::const_instr_iterator BII) const {
// There is no stall when I is not a V60 vector.
- if (!isV60VectorInstruction(MI))
+ if (!isHVXVec(MI))
return false;
MachineBasicBlock::const_instr_iterator MII = BII;
@@ -3415,7 +3367,6 @@ int HexagonInstrInfo::getNonDotCurOp(const MachineInstr &MI) const {
// p.old store
// [if (p0)memw(R0+#0)=R2]
//
-//
// The following set of instructions further explains the scenario where
// conditional new-value store becomes invalid when promoted to .new predicate
// form.
@@ -4025,18 +3976,53 @@ unsigned HexagonInstrInfo::getInstrTimingClassLatency(
if (!ItinData)
return getInstrLatency(ItinData, MI);
- // Get the latency embedded in the itinerary. If we're not using timing class
- // latencies or if we using BSB scheduling, then restrict the maximum latency
- // to 1 (that is, either 0 or 1).
if (MI.isTransient())
return 0;
- unsigned Latency = ItinData->getStageLatency(MI.getDesc().getSchedClass());
- if (!EnableTimingClassLatency ||
- MI.getParent()->getParent()->getSubtarget<HexagonSubtarget>().
- useBSBScheduling())
- if (Latency > 1)
- Latency = 1;
- return Latency;
+ return ItinData->getStageLatency(MI.getDesc().getSchedClass());
+}
+
+/// getOperandLatency - Compute and return the use operand latency of a given
+/// pair of def and use.
+/// In most cases, the static scheduling itinerary was enough to determine the
+/// operand latency. But it may not be possible for instructions with variable
+/// number of defs / uses.
+///
+/// This is a raw interface to the itinerary that may be directly overriden by
+/// a target. Use computeOperandLatency to get the best estimate of latency.
+int HexagonInstrInfo::getOperandLatency(const InstrItineraryData *ItinData,
+ const MachineInstr &DefMI,
+ unsigned DefIdx,
+ const MachineInstr &UseMI,
+ unsigned UseIdx) const {
+ auto &RI = getRegisterInfo();
+ // Get DefIdx and UseIdx for super registers.
+ MachineOperand DefMO = DefMI.getOperand(DefIdx);
+
+ if (RI.isPhysicalRegister(DefMO.getReg())) {
+ if (DefMO.isImplicit()) {
+ for (MCSuperRegIterator SR(DefMO.getReg(), &RI); SR.isValid(); ++SR) {
+ int Idx = DefMI.findRegisterDefOperandIdx(*SR, false, false, &RI);
+ if (Idx != -1) {
+ DefIdx = Idx;
+ break;
+ }
+ }
+ }
+
+ MachineOperand UseMO = UseMI.getOperand(UseIdx);
+ if (UseMO.isImplicit()) {
+ for (MCSuperRegIterator SR(UseMO.getReg(), &RI); SR.isValid(); ++SR) {
+ int Idx = UseMI.findRegisterUseOperandIdx(*SR, false, &RI);
+ if (Idx != -1) {
+ UseIdx = Idx;
+ break;
+ }
+ }
+ }
+ }
+
+ return TargetInstrInfo::getOperandLatency(ItinData, DefMI, DefIdx,
+ UseMI, UseIdx);
}
// inverts the predication logic.
diff --git a/lib/Target/Hexagon/HexagonInstrInfo.h b/lib/Target/Hexagon/HexagonInstrInfo.h
index 21b4f738f6e8..97b9bc954688 100644
--- a/lib/Target/Hexagon/HexagonInstrInfo.h
+++ b/lib/Target/Hexagon/HexagonInstrInfo.h
@@ -288,6 +288,19 @@ public:
/// If the instruction is an increment of a constant value, return the amount.
bool getIncrementValue(const MachineInstr &MI, int &Value) const override;
+ /// getOperandLatency - Compute and return the use operand latency of a given
+ /// pair of def and use.
+ /// In most cases, the static scheduling itinerary was enough to determine the
+ /// operand latency. But it may not be possible for instructions with variable
+ /// number of defs / uses.
+ ///
+ /// This is a raw interface to the itinerary that may be directly overriden by
+ /// a target. Use computeOperandLatency to get the best estimate of latency.
+ int getOperandLatency(const InstrItineraryData *ItinData,
+ const MachineInstr &DefMI, unsigned DefIdx,
+ const MachineInstr &UseMI,
+ unsigned UseIdx) const override;
+
bool isTailCall(const MachineInstr &MI) const override;
/// HexagonInstrInfo specifics.
@@ -356,7 +369,7 @@ public:
bool isTC4x(const MachineInstr &MI) const;
bool isToBeScheduledASAP(const MachineInstr &MI1,
const MachineInstr &MI2) const;
- bool isV60VectorInstruction(const MachineInstr &MI) const;
+ bool isHVXVec(const MachineInstr &MI) const;
bool isValidAutoIncImm(const EVT VT, const int Offset) const;
bool isValidOffset(unsigned Opcode, int Offset, bool Extend = true) const;
bool isVecAcc(const MachineInstr &MI) const;
diff --git a/lib/Target/Hexagon/HexagonMachineScheduler.cpp b/lib/Target/Hexagon/HexagonMachineScheduler.cpp
index 20dc9b0da1db..324108284a9a 100644
--- a/lib/Target/Hexagon/HexagonMachineScheduler.cpp
+++ b/lib/Target/Hexagon/HexagonMachineScheduler.cpp
@@ -744,7 +744,7 @@ int ConvergingVLIWScheduler::SchedulingCost(ReadyQueue &Q, SUnit *SU,
// Give less preference to an instruction that will cause a stall with
// an instruction in the previous packet.
- if (QII.isV60VectorInstruction(Instr)) {
+ if (QII.isHVXVec(Instr)) {
// Check for stalls in the previous packet.
if (Q.getID() == TopQID) {
for (auto J : Top.ResourceModel->OldPacket)
diff --git a/lib/Target/Hexagon/HexagonPatterns.td b/lib/Target/Hexagon/HexagonPatterns.td
index b8c3bf0745ce..32503d111c24 100644
--- a/lib/Target/Hexagon/HexagonPatterns.td
+++ b/lib/Target/Hexagon/HexagonPatterns.td
@@ -1,13 +1,5 @@
// Pattern fragment that combines the value type and the register class
// into a single parameter.
-// The pat frags in the definitions below need to have a named register,
-// otherwise i32 will be assumed regardless of the register class. The
-// name of the register does not matter.
-def I1 : PatLeaf<(i1 PredRegs:$R)>;
-def I32 : PatLeaf<(i32 IntRegs:$R)>;
-def I64 : PatLeaf<(i64 DoubleRegs:$R)>;
-def F32 : PatLeaf<(f32 IntRegs:$R)>;
-def F64 : PatLeaf<(f64 DoubleRegs:$R)>;
// Pattern fragments to extract the low and high subregisters from a
// 64-bit value.
diff --git a/lib/Target/Hexagon/HexagonPseudo.td b/lib/Target/Hexagon/HexagonPseudo.td
index 2e8def572c4b..8c2caea2d5c5 100644
--- a/lib/Target/Hexagon/HexagonPseudo.td
+++ b/lib/Target/Hexagon/HexagonPseudo.td
@@ -7,6 +7,15 @@
//
//===----------------------------------------------------------------------===//
+// The pat frags in the definitions below need to have a named register,
+// otherwise i32 will be assumed regardless of the register class. The
+// name of the register does not matter.
+def I1 : PatLeaf<(i1 PredRegs:$R)>;
+def I32 : PatLeaf<(i32 IntRegs:$R)>;
+def I64 : PatLeaf<(i64 DoubleRegs:$R)>;
+def F32 : PatLeaf<(f32 IntRegs:$R)>;
+def F64 : PatLeaf<(f64 DoubleRegs:$R)>;
+
let PrintMethod = "printGlobalOperand" in {
def globaladdress : Operand<i32>;
def globaladdressExt : Operand<i32>;
@@ -23,17 +32,20 @@ def DUPLEX_Pseudo : InstHexagon<(outs),
let isExtendable = 1, opExtendable = 1, opExtentBits = 6,
isAsmParserOnly = 1 in
-def TFRI64_V2_ext : ALU64_rr<(outs DoubleRegs:$dst),
- (ins s32_0Imm:$src1, s8_0Imm:$src2),
- "$dst=combine(#$src1,#$src2)">;
+def TFRI64_V2_ext : InstHexagon<(outs DoubleRegs:$dst),
+ (ins s32_0Imm:$src1, s8_0Imm:$src2),
+ "$dst=combine(#$src1,#$src2)", [], "",
+ A2_combineii.Itinerary, TypeALU32_2op>, OpcodeHexagon;
// HI/LO Instructions
let isReMaterializable = 1, isMoveImm = 1, hasSideEffects = 0,
hasNewValue = 1, opNewValue = 0 in
-class REG_IMMED<string RegHalf, bit Rs, bits<3> MajOp, bit MinOp>
+class REG_IMMED<string RegHalf, bit Rs, bits<3> MajOp, bit MinOp,
+ InstHexagon rootInst>
: InstHexagon<(outs IntRegs:$dst),
- (ins u16_0Imm:$imm_value),
- "$dst"#RegHalf#"=#$imm_value", [], "", ALU32_2op_tc_1_SLOT0123, TypeALU32_2op>, OpcodeHexagon {
+ (ins u16_0Imm:$imm_value),
+ "$dst"#RegHalf#"=#$imm_value", [], "",
+ rootInst.Itinerary, rootInst.Type>, OpcodeHexagon {
bits<5> dst;
bits<32> imm_value;
@@ -46,8 +58,8 @@ class REG_IMMED<string RegHalf, bit Rs, bits<3> MajOp, bit MinOp>
}
let isAsmParserOnly = 1 in {
- def LO : REG_IMMED<".l", 0b0, 0b001, 0b1>;
- def HI : REG_IMMED<".h", 0b0, 0b010, 0b1>;
+ def LO : REG_IMMED<".l", 0b0, 0b001, 0b1, A2_tfril>;
+ def HI : REG_IMMED<".h", 0b0, 0b010, 0b1, A2_tfrih>;
}
let isReMaterializable = 1, isMoveImm = 1, isAsmParserOnly = 1 in {
@@ -59,11 +71,13 @@ let isReMaterializable = 1, isMoveImm = 1, isAsmParserOnly = 1 in {
let hasSideEffects = 0, isReMaterializable = 1, isPseudo = 1,
isCodeGenOnly = 1 in
-def PS_true : SInst<(outs PredRegs:$dst), (ins), "", []>;
+def PS_true : InstHexagon<(outs PredRegs:$dst), (ins), "",
+ [(set I1:$dst, 1)], "", C2_orn.Itinerary, TypeCR>;
let hasSideEffects = 0, isReMaterializable = 1, isPseudo = 1,
isCodeGenOnly = 1 in
-def PS_false : SInst<(outs PredRegs:$dst), (ins), "", []>;
+def PS_false : InstHexagon<(outs PredRegs:$dst), (ins), "",
+ [(set I1:$dst, 0)], "", C2_andn.Itinerary, TypeCR>;
let Defs = [R29, R30], Uses = [R31, R30, R29], isPseudo = 1 in
def ADJCALLSTACKDOWN : Pseudo<(outs), (ins i32imm:$amt),
@@ -90,10 +104,10 @@ def ENDLOOP1 : Endloop<(outs), (ins b30_2Imm:$offset),
let isExtendable = 1, isExtentSigned = 1, opExtentBits = 9, opExtentAlign = 2,
opExtendable = 0, hasSideEffects = 0 in
-class LOOP_iBase<string mnemonic, Operand brOp, bit mustExtend = 0>
- : CRInst<(outs), (ins brOp:$offset, u10_0Imm:$src2),
+class LOOP_iBase<string mnemonic, InstHexagon rootInst>
+ : InstHexagon <(outs), (ins b30_2Imm:$offset, u10_0Imm:$src2),
#mnemonic#"($offset,#$src2)",
- [], "" , CR_tc_3x_SLOT3> {
+ [], "", rootInst.Itinerary, rootInst.Type>, OpcodeHexagon {
bits<9> offset;
bits<10> src2;
@@ -110,10 +124,10 @@ class LOOP_iBase<string mnemonic, Operand brOp, bit mustExtend = 0>
let isExtendable = 1, isExtentSigned = 1, opExtentBits = 9, opExtentAlign = 2,
opExtendable = 0, hasSideEffects = 0 in
-class LOOP_rBase<string mnemonic, Operand brOp, bit mustExtend = 0>
- : CRInst<(outs), (ins brOp:$offset, IntRegs:$src2),
+class LOOP_rBase<string mnemonic, InstHexagon rootInst>
+ : InstHexagon<(outs), (ins b30_2Imm:$offset, IntRegs:$src2),
#mnemonic#"($offset,$src2)",
- [], "" ,CR_tc_3x_SLOT3> {
+ [], "", rootInst.Itinerary, rootInst.Type>, OpcodeHexagon {
bits<9> offset;
bits<5> src2;
@@ -126,27 +140,25 @@ class LOOP_rBase<string mnemonic, Operand brOp, bit mustExtend = 0>
let Inst{4-3} = offset{3-2};
}
-multiclass LOOP_ri<string mnemonic> {
- let isCodeGenOnly = 1, isExtended = 1, opExtendable = 0 in {
- def iext: LOOP_iBase<mnemonic, b30_2Imm, 1>;
- def rext: LOOP_rBase<mnemonic, b30_2Imm, 1>;
- }
+let Defs = [SA0, LC0, USR], isCodeGenOnly = 1, isExtended = 1,
+ opExtendable = 0 in {
+ def J2_loop0iext : LOOP_iBase<"loop0", J2_loop0i>;
+ def J2_loop1iext : LOOP_iBase<"loop1", J2_loop1i>;
}
-
-let Defs = [SA0, LC0, USR] in
-defm J2_loop0 : LOOP_ri<"loop0">;
-
// Interestingly only loop0's appear to set usr.lpcfg
-let Defs = [SA1, LC1] in
-defm J2_loop1 : LOOP_ri<"loop1">;
+let Defs = [SA1, LC1], isCodeGenOnly = 1, isExtended = 1, opExtendable = 0 in {
+ def J2_loop0rext : LOOP_rBase<"loop0", J2_loop0r>;
+ def J2_loop1rext : LOOP_rBase<"loop1", J2_loop1r>;
+}
let isCall = 1, hasSideEffects = 1, isPredicable = 0,
isExtended = 0, isExtendable = 1, opExtendable = 0,
isExtentSigned = 1, opExtentBits = 24, opExtentAlign = 2 in
class T_Call<string ExtStr>
- : JInst<(outs), (ins a30_2Imm:$dst),
- "call " # ExtStr # "$dst", [], "", J_tc_2early_SLOT23> {
+ : InstHexagon<(outs), (ins a30_2Imm:$dst),
+ "call " # ExtStr # "$dst", [], "", J2_call.Itinerary, TypeJ>,
+ OpcodeHexagon {
let BaseOpcode = "call";
bits<24> dst;
@@ -164,38 +176,24 @@ let isCodeGenOnly = 1, isCall = 1, hasSideEffects = 1,
Defs = [PC, R31, R6, R7, P0] in
def PS_call_stk : T_Call<"">;
-let isCall = 1, hasSideEffects = 1, cofMax1 = 1 in
-class JUMPR_MISC_CALLR<bit isPred, bit isPredNot,
- dag InputDag = (ins IntRegs:$Rs)>
- : JInst<(outs), InputDag,
- !if(isPred, !if(isPredNot, "if (!$Pu) callr $Rs",
- "if ($Pu) callr $Rs"),
- "callr $Rs"),
- [], "", J_tc_2early_SLOT2> {
+// Call, no return.
+let isCall = 1, hasSideEffects = 1, cofMax1 = 1, isCodeGenOnly = 1 in
+def PS_callr_nr: InstHexagon<(outs), (ins IntRegs:$Rs),
+ "callr $Rs", [], "", J2_callr.Itinerary, TypeJ>, OpcodeHexagon {
bits<5> Rs;
bits<2> Pu;
- let isPredicated = isPred;
- let isPredicatedFalse = isPredNot;
+ let isPredicatedFalse = 1;
let IClass = 0b0101;
- let Inst{27-25} = 0b000;
- let Inst{24-23} = !if (isPred, 0b10, 0b01);
- let Inst{22} = 0;
- let Inst{21} = isPredNot;
- let Inst{9-8} = !if (isPred, Pu, 0b00);
+ let Inst{27-21} = 0b0000101;
let Inst{20-16} = Rs;
-
}
-let isCodeGenOnly = 1 in {
- def PS_callr_nr : JUMPR_MISC_CALLR<0, 1>; // Call, no return.
-}
-
let isCall = 1, hasSideEffects = 1,
isExtended = 0, isExtendable = 1, opExtendable = 0, isCodeGenOnly = 1,
- BaseOpcode = "PS_call_nr", isExtentSigned = 1, opExtentAlign = 2,
- Itinerary = J_tc_2early_SLOT23 in
-class Call_nr<bits<5> nbits, bit isPred, bit isFalse, dag iops>
+ BaseOpcode = "PS_call_nr", isExtentSigned = 1, opExtentAlign = 2 in
+class Call_nr<bits<5> nbits, bit isPred, bit isFalse, dag iops,
+ InstrItinClass itin>
: Pseudo<(outs), iops, "">, PredRel {
bits<2> Pu;
bits<17> dst;
@@ -205,16 +203,18 @@ class Call_nr<bits<5> nbits, bit isPred, bit isFalse, dag iops>
let isPredicatedFalse = isFalse;
}
-def PS_call_nr : Call_nr<24, 0, 0, (ins s32_0Imm:$Ii)>;
-//def PS_call_nrt: Call_nr<17, 1, 0, (ins PredRegs:$Pu, s32_0Imm:$dst)>;
-//def PS_call_nrf: Call_nr<17, 1, 1, (ins PredRegs:$Pu, s32_0Imm:$dst)>;
+def PS_call_nr : Call_nr<24, 0, 0, (ins s32_0Imm:$Ii), J2_call.Itinerary>;
+//def PS_call_nrt: Call_nr<17, 1, 0, (ins PredRegs:$Pu, s32_0Imm:$dst),
+// J2_callt.Itinerary>;
+//def PS_call_nrf: Call_nr<17, 1, 1, (ins PredRegs:$Pu, s32_0Imm:$dst),
+// J2_callf.Itinerary>;
let isBranch = 1, isIndirectBranch = 1, isBarrier = 1, Defs = [PC],
isPredicable = 1, hasSideEffects = 0, InputType = "reg",
cofMax1 = 1 in
-class T_JMPr
+class T_JMPr <InstHexagon rootInst>
: InstHexagon<(outs), (ins IntRegs:$dst), "jumpr $dst", [],
- "", J_tc_2early_SLOT2, TypeJ>, OpcodeHexagon {
+ "", rootInst.Itinerary, rootInst.Type>, OpcodeHexagon {
bits<5> dst;
let IClass = 0b0101;
@@ -225,12 +225,12 @@ class T_JMPr
// A return through builtin_eh_return.
let isReturn = 1, isTerminator = 1, isBarrier = 1, hasSideEffects = 0,
isCodeGenOnly = 1, Defs = [PC], Uses = [R28], isPredicable = 0 in
-def EH_RETURN_JMPR : T_JMPr;
+def EH_RETURN_JMPR : T_JMPr<J2_jumpr>;
// Indirect tail-call.
let isPseudo = 1, isCall = 1, isReturn = 1, isBarrier = 1, isPredicable = 0,
isTerminator = 1, isCodeGenOnly = 1 in
-def PS_tailcall_r : T_JMPr;
+def PS_tailcall_r : T_JMPr<J2_jumpr>;
//
// Direct tail-calls.
@@ -262,11 +262,11 @@ class JumpOpcStr<string Mnemonic, bit New, bit Taken> {
}
let isBranch = 1, isIndirectBranch = 1, Defs = [PC], isPredicated = 1,
hasSideEffects = 0, InputType = "reg", cofMax1 = 1 in
-class T_JMPr_c <bit PredNot, bit isPredNew, bit isTak>
+class T_JMPr_c <bit PredNot, bit isPredNew, bit isTak, InstHexagon rootInst>
: InstHexagon<(outs), (ins PredRegs:$src, IntRegs:$dst),
CondStr<"$src", !if(PredNot,0,1), isPredNew>.S #
JumpOpcStr<"jumpr", isPredNew, isTak>.S # " $dst",
- [], "", J_tc_2early_SLOT2, TypeJ>, OpcodeHexagon {
+ [], "", rootInst.Itinerary, rootInst.Type>, OpcodeHexagon {
let isTaken = isTak;
let isPredicatedFalse = PredNot;
@@ -283,30 +283,25 @@ class T_JMPr_c <bit PredNot, bit isPredNew, bit isTak>
let Inst{11} = isPredNew;
let Inst{9-8} = src;
}
-multiclass JMPR_Pred<bit PredNot> {
- def NAME : T_JMPr_c<PredNot, 0, 0>; // not taken
- // Predicate new
- def NAME#newpt : T_JMPr_c<PredNot, 1, 1>; // taken
- def NAME#new : T_JMPr_c<PredNot, 1, 0>; // not taken
-}
-multiclass JMPR_base<string BaseOp> {
- let BaseOpcode = BaseOp in {
- def NAME : T_JMPr;
- defm t : JMPR_Pred<0>;
- defm f : JMPR_Pred<1>;
- }
+
+let isTerminator = 1, hasSideEffects = 0, isReturn = 1, isCodeGenOnly = 1,
+ isBarrier = 1, BaseOpcode = "JMPret" in {
+ def PS_jmpret : T_JMPr<J2_jumpr>, PredNewRel;
+ def PS_jmprett : T_JMPr_c<0, 0, 0, J2_jumprt>, PredNewRel;
+ def PS_jmpretf : T_JMPr_c<1, 0, 0, J2_jumprf>, PredNewRel;
+ def PS_jmprettnew : T_JMPr_c<0, 1, 0, J2_jumprtnew>, PredNewRel;
+ def PS_jmpretfnew : T_JMPr_c<1, 1, 0, J2_jumprfnew>, PredNewRel;
+ def PS_jmprettnewpt : T_JMPr_c<0, 1, 1, J2_jumprtnewpt>, PredNewRel;
+ def PS_jmpretfnewpt : T_JMPr_c<1, 1, 1, J2_jumprfnewpt>, PredNewRel;
}
-let isTerminator = 1, hasSideEffects = 0, isReturn = 1, isCodeGenOnly = 1, isBarrier = 1 in
-defm PS_jmpret : JMPR_base<"JMPret">, PredNewRel;
//defm V6_vtran2x2_map : HexagonMapping<(outs VectorRegs:$Vy32, VectorRegs:$Vx32), (ins VectorRegs:$Vx32in, IntRegs:$Rt32), "vtrans2x2(${Vy32},${Vx32},${Rt32})", (V6_vshuff VectorRegs:$Vy32, VectorRegs:$Vx32, VectorRegs:$Vx32in, IntRegs:$Rt32)>;
// The reason for the custom inserter is to record all ALLOCA instructions
// in MachineFunctionInfo.
-let Defs = [R29], isCodeGenOnly = 1, isPseudo = 1, hasSideEffects = 1 in
-def PS_alloca: InstHexagon<(outs IntRegs:$Rd),
- (ins IntRegs:$Rs, u32_0Imm:$A), "",
- [], "", ALU32_2op_tc_1_SLOT0123, TypeALU32_2op>;
+let Defs = [R29], hasSideEffects = 1 in
+def PS_alloca: Pseudo <(outs IntRegs:$Rd),
+ (ins IntRegs:$Rs, u32_0Imm:$A), "", []>;
// Load predicate.
let isExtendable = 1, opExtendable = 2, isExtentSigned = 1, opExtentBits = 13,
@@ -322,35 +317,19 @@ def LDriw_mod : LDInst<(outs ModRegs:$dst),
(ins IntRegs:$addr, s32_0Imm:$off),
".error \"should not emit\"", []>;
-// Vector load
-let Predicates = [HasV60T, UseHVX] in
-let mayLoad = 1, hasSideEffects = 0 in
- class V6_LDInst<dag outs, dag ins, string asmstr, list<dag> pattern = [],
- string cstr = "", InstrItinClass itin = CVI_VM_LD,
- IType type = TypeCVI_VM_LD>
- : InstHexagon<outs, ins, asmstr, pattern, cstr, itin, type>;
-
-// Vector store
-let Predicates = [HasV60T, UseHVX] in
-let mayStore = 1, hasSideEffects = 0 in
-class V6_STInst<dag outs, dag ins, string asmstr, list<dag> pattern = [],
- string cstr = "", InstrItinClass itin = CVI_VM_ST,
- IType type = TypeCVI_VM_ST>
-: InstHexagon<outs, ins, asmstr, pattern, cstr, itin, type>;
let isCodeGenOnly = 1, isPseudo = 1 in
-def PS_pselect : ALU64_rr<(outs DoubleRegs:$Rd),
+def PS_pselect: InstHexagon<(outs DoubleRegs:$Rd),
(ins PredRegs:$Pu, DoubleRegs:$Rs, DoubleRegs:$Rt),
- ".error \"should not emit\" ", []>;
+ ".error \"should not emit\" ", [], "", A2_tfrpt.Itinerary, TypeALU32_2op>;
let isBranch = 1, isBarrier = 1, Defs = [PC], hasSideEffects = 0,
isPredicable = 1,
isExtendable = 1, opExtendable = 0, isExtentSigned = 1,
opExtentBits = 24, opExtentAlign = 2, InputType = "imm" in
-class T_JMP<string ExtStr>
- : JInst_CJUMP_UCJUMP<(outs), (ins b30_2Imm:$dst),
- "jump " # ExtStr # "$dst",
- [], "", J_tc_2early_CJUMP_UCJUMP_ARCHDEPSLOT> {
+class T_JMP: InstHexagon<(outs), (ins b30_2Imm:$dst),
+ "jump $dst",
+ [], "", J2_jump.Itinerary, TypeJ>, OpcodeHexagon {
bits<24> dst;
let IClass = 0b0101;
@@ -362,16 +341,16 @@ class T_JMP<string ExtStr>
// Restore registers and dealloc return function call.
let isCall = 1, isBarrier = 1, isReturn = 1, isTerminator = 1,
Defs = [R29, R30, R31, PC], isPredicable = 0, isAsmParserOnly = 1 in {
- def RESTORE_DEALLOC_RET_JMP_V4 : T_JMP<"">;
+ def RESTORE_DEALLOC_RET_JMP_V4 : T_JMP;
let isExtended = 1, opExtendable = 0 in
- def RESTORE_DEALLOC_RET_JMP_V4_EXT : T_JMP<"">;
+ def RESTORE_DEALLOC_RET_JMP_V4_EXT : T_JMP;
let Defs = [R14, R15, R28, R29, R30, R31, PC] in {
- def RESTORE_DEALLOC_RET_JMP_V4_PIC : T_JMP<"">;
+ def RESTORE_DEALLOC_RET_JMP_V4_PIC : T_JMP;
let isExtended = 1, opExtendable = 0 in
- def RESTORE_DEALLOC_RET_JMP_V4_EXT_PIC : T_JMP<"">;
+ def RESTORE_DEALLOC_RET_JMP_V4_EXT_PIC : T_JMP;
}
}
@@ -416,33 +395,38 @@ let isCall = 1, Uses = [R29, R31], isAsmParserOnly = 1 in {
def SAVE_REGISTERS_CALL_V4STK_EXT_PIC : T_Call<"">, PredRel;
}
-// Vector load/store pseudos
+// Vector store pseudos
+let Predicates = [HasV60T, UseHVX], isPseudo = 1, isCodeGenOnly = 1,
+ mayStore = 1, hasSideEffects = 0 in
+class STrivv_template<RegisterClass RC, InstHexagon rootInst>
+ : InstHexagon<(outs), (ins IntRegs:$addr, s32_0Imm:$off, RC:$src),
+ "", [], "", rootInst.Itinerary, rootInst.Type>;
-let isPseudo = 1, isCodeGenOnly = 1 in
-class STrivv_template<RegisterClass RC>
- : V6_STInst<(outs), (ins IntRegs:$addr, s32_0Imm:$off, RC:$src), "", []>;
-
-def PS_vstorerw_ai: STrivv_template<VecDblRegs>,
- Requires<[HasV60T,UseHVXSgl]>;
-def PS_vstorerwu_ai: STrivv_template<VecDblRegs>,
+def PS_vstorerw_ai: STrivv_template<VecDblRegs, V6_vS32b_ai>,
Requires<[HasV60T,UseHVXSgl]>;
-def PS_vstorerw_ai_128B: STrivv_template<VecDblRegs128B>,
- Requires<[HasV60T,UseHVXDbl]>;
-def PS_vstorerwu_ai_128B: STrivv_template<VecDblRegs128B>,
+def PS_vstorerw_ai_128B: STrivv_template<VecDblRegs128B, V6_vS32b_ai_128B>,
Requires<[HasV60T,UseHVXDbl]>;
+def PS_vstorerwu_ai: STrivv_template<VecDblRegs, V6_vS32Ub_ai>,
+ Requires<[HasV60T,UseHVXSgl]>;
+def PS_vstorerwu_ai_128B: STrivv_template<VecDblRegs128B, V6_vS32Ub_ai_128B>,
+ Requires<[HasV60T,UseHVXDbl]>;
-let isPseudo = 1, isCodeGenOnly = 1 in
-class LDrivv_template<RegisterClass RC>
- : V6_LDInst<(outs RC:$dst), (ins IntRegs:$addr, s32_0Imm:$off), "", []>;
+// Vector load pseudos
+let Predicates = [HasV60T, UseHVX], isPseudo = 1, isCodeGenOnly = 1,
+ mayLoad = 1, hasSideEffects = 0 in
+class LDrivv_template<RegisterClass RC, InstHexagon rootInst>
+ : InstHexagon<(outs RC:$dst), (ins IntRegs:$addr, s32_0Imm:$off),
+ "", [], "", rootInst.Itinerary, rootInst.Type>;
-def PS_vloadrw_ai: LDrivv_template<VecDblRegs>,
- Requires<[HasV60T,UseHVXSgl]>;
-def PS_vloadrwu_ai: LDrivv_template<VecDblRegs>,
+def PS_vloadrw_ai: LDrivv_template<VecDblRegs, V6_vL32b_ai>,
Requires<[HasV60T,UseHVXSgl]>;
-def PS_vloadrw_ai_128B: LDrivv_template<VecDblRegs128B>,
+def PS_vloadrw_ai_128B: LDrivv_template<VecDblRegs128B, V6_vL32b_ai_128B>,
Requires<[HasV60T,UseHVXDbl]>;
-def PS_vloadrwu_ai_128B: LDrivv_template<VecDblRegs128B>,
+
+def PS_vloadrwu_ai: LDrivv_template<VecDblRegs, V6_vL32Ub_ai>,
+ Requires<[HasV60T,UseHVXSgl]>;
+def PS_vloadrwu_ai_128B: LDrivv_template<VecDblRegs128B, V6_vL32Ub_ai_128B>,
Requires<[HasV60T,UseHVXDbl]>;
// Store vector predicate pseudo.
@@ -469,25 +453,23 @@ let isExtendable = 1, opExtendable = 1, isExtentSigned = 1, opExtentBits = 13,
Requires<[HasV60T,UseHVXDbl]>;
}
-class VSELInst<dag outs, dag ins, string asmstr, list<dag> pattern = [],
- string cstr = "", InstrItinClass itin = CVI_VA_DV,
- IType type = TypeCVI_VA_DV>
- : InstHexagon<outs, ins, asmstr, pattern, cstr, itin, type>;
-
-let isCodeGenOnly = 1, isPseudo = 1, hasSideEffects = 0 in {
- def PS_vselect: VSELInst<(outs VectorRegs:$dst),
- (ins PredRegs:$src1, VectorRegs:$src2, VectorRegs:$src3), "", []>,
- Requires<[HasV60T,UseHVXSgl]>;
- def PS_vselect_128B: VSELInst<(outs VectorRegs128B:$dst),
- (ins PredRegs:$src1, VectorRegs128B:$src2, VectorRegs128B:$src3),
- "", []>, Requires<[HasV60T,UseHVXDbl]>;
- def PS_wselect: VSELInst<(outs VecDblRegs:$dst),
- (ins PredRegs:$src1, VecDblRegs:$src2, VecDblRegs:$src3), "", []>,
- Requires<[HasV60T,UseHVXSgl]>;
- def PS_wselect_128B: VSELInst<(outs VecDblRegs128B:$dst),
- (ins PredRegs:$src1, VecDblRegs128B:$src2, VecDblRegs128B:$src3),
- "", []>, Requires<[HasV60T,UseHVXDbl]>;
-}
+let isCodeGenOnly = 1, isPseudo = 1, hasSideEffects = 0 in
+class VSELInst<dag outs, dag ins, InstHexagon rootInst>
+ : InstHexagon<outs, ins, "", [], "", rootInst.Itinerary, rootInst.Type>;
+
+def PS_vselect: VSELInst<(outs VectorRegs:$dst),
+ (ins PredRegs:$src1, VectorRegs:$src2, VectorRegs:$src3),
+ V6_vcmov>, Requires<[HasV60T,UseHVXSgl]>;
+def PS_vselect_128B: VSELInst<(outs VectorRegs128B:$dst),
+ (ins PredRegs:$src1, VectorRegs128B:$src2, VectorRegs128B:$src3),
+ V6_vcmov>, Requires<[HasV60T,UseHVXDbl]>;
+
+def PS_wselect: VSELInst<(outs VecDblRegs:$dst),
+ (ins PredRegs:$src1, VecDblRegs:$src2, VecDblRegs:$src3),
+ V6_vccombine>, Requires<[HasV60T,UseHVXSgl]>;
+def PS_wselect_128B: VSELInst<(outs VecDblRegs128B:$dst),
+ (ins PredRegs:$src1, VecDblRegs128B:$src2, VecDblRegs128B:$src3),
+ V6_vccombine>, Requires<[HasV60T,UseHVXDbl]>;
// Store predicate.
let isExtendable = 1, opExtendable = 1, isExtentSigned = 1, opExtentBits = 13,
@@ -504,8 +486,10 @@ def STriw_mod : STInst<(outs),
let isExtendable = 1, opExtendable = 1, opExtentBits = 6,
isAsmParserOnly = 1 in
-def TFRI64_V4 : ALU64_rr<(outs DoubleRegs:$dst), (ins u64_0Imm:$src1),
- "$dst = #$src1">;
+def TFRI64_V4 : InstHexagon<(outs DoubleRegs:$dst),
+ (ins u64_0Imm:$src1),
+ "$dst = #$src1", [], "",
+ A2_combineii.Itinerary, TypeALU32_2op>, OpcodeHexagon;
// Hexagon doesn't have a vector multiply with C semantics.
// Instead, generate a pseudo instruction that gets expaneded into two
diff --git a/lib/Target/Hexagon/HexagonRegisterInfo.td b/lib/Target/Hexagon/HexagonRegisterInfo.td
index 2519b7c40062..45dbb3a6d218 100644
--- a/lib/Target/Hexagon/HexagonRegisterInfo.td
+++ b/lib/Target/Hexagon/HexagonRegisterInfo.td
@@ -122,12 +122,6 @@ let Namespace = "Hexagon" in {
def P2 : Rp<2, "p2">, DwarfRegNum<[65]>;
def P3 : Rp<3, "p3">, DwarfRegNum<[66]>;
- // Modifier registers.
- // C6 and C7 can also be M0 and M1, but register names must be unique, even
- // if belonging to different register classes.
- def M0 : Mx<0, "m0">, DwarfRegNum<[72]>;
- def M1 : Mx<1, "m1">, DwarfRegNum<[73]>;
-
// Fake register to represent USR.OVF bit. Artihmetic/saturating instruc-
// tions modify this bit, and multiple such instructions are allowed in the
// same packet. We need to ignore output dependencies on this bit, but not
@@ -149,8 +143,8 @@ let Namespace = "Hexagon" in {
// When defining more Cn registers, make sure to explicitly mark them
// as reserved in HexagonRegisterInfo.cpp.
def C5: Rc<5, "c5", ["c5"]>, DwarfRegNum<[72]>;
- def C6: Rc<6, "c6", [], [M0]>, DwarfRegNum<[73]>;
- def C7: Rc<7, "c7", [], [M1]>, DwarfRegNum<[74]>;
+ def M0: Rc<6, "m0", ["c6"]>, DwarfRegNum<[73]>;
+ def M1: Rc<7, "m1", ["c7"]>, DwarfRegNum<[74]>;
// Define C8 separately and make it aliased with USR.
// The problem is that USR has subregisters (e.g. overflow). If USR was
// specified as a subregister of C9_8, it would imply that subreg_overflow
@@ -177,7 +171,7 @@ let Namespace = "Hexagon" in {
def C1_0: Rcc<0, "c1:0", [SA0, LC0], ["lc0:sa0"]>, DwarfRegNum<[67]>;
def C3_2: Rcc<2, "c3:2", [SA1, LC1], ["lc1:sa1"]>, DwarfRegNum<[69]>;
def C5_4: Rcc<4, "c5:4", [P3_0, C5]>, DwarfRegNum<[71]>;
- def C7_6: Rcc<6, "c7:6", [C6, C7], ["m1:0"]>, DwarfRegNum<[72]>;
+ def C7_6: Rcc<6, "c7:6", [M0, M1], ["m1:0"]>, DwarfRegNum<[72]>;
// Use C8 instead of USR as a subregister of C9_8.
def C9_8: Rcc<8, "c9:8", [C8, PC]>, DwarfRegNum<[74]>;
def C11_10: Rcc<10, "c11:10", [UGP, GP]>, DwarfRegNum<[76]>;
@@ -280,8 +274,8 @@ def ModRegs : RegisterClass<"Hexagon", [i32], 32, (add M0, M1)>;
let Size = 32, isAllocatable = 0 in
def CtrRegs : RegisterClass<"Hexagon", [i32], 32,
- (add LC0, SA0, LC1, SA1, P3_0, C5, C6, C7,
- C8, PC, UGP, GP, CS0, CS1, UPCYCLELO, UPCYCLEHI,
+ (add LC0, SA0, LC1, SA1, P3_0, C5, C8, PC, UGP, GP, CS0, CS1,
+ UPCYCLELO, UPCYCLEHI,
FRAMELIMIT, FRAMEKEY, PKTCOUNTLO, PKTCOUNTHI, UTIMERLO, UTIMERHI,
M0, M1, USR)>;
diff --git a/lib/Target/Hexagon/HexagonSchedule.td b/lib/Target/Hexagon/HexagonSchedule.td
index 9b5fbea04d18..ffee03e72639 100644
--- a/lib/Target/Hexagon/HexagonSchedule.td
+++ b/lib/Target/Hexagon/HexagonSchedule.td
@@ -7,6 +7,55 @@
//
//===----------------------------------------------------------------------===//
+def Hex_FWD : Bypass;
+def HVX_FWD : Bypass;
+
+// Functional Units.
+def SLOT0 : FuncUnit;
+def SLOT1 : FuncUnit;
+def SLOT2 : FuncUnit;
+def SLOT3 : FuncUnit;
+// Endloop is a pseudo instruction that is encoded with 2 bits in a packet
+// rather than taking an execution slot. This special unit is needed
+// to schedule an ENDLOOP with 4 other instructions.
+def SLOT_ENDLOOP: FuncUnit;
+
+// CVI pipes from the "Hexagon Multimedia Co-Processor Extensions Arch Spec".
+def CVI_ST : FuncUnit;
+def CVI_XLANE : FuncUnit;
+def CVI_SHIFT : FuncUnit;
+def CVI_MPY0 : FuncUnit;
+def CVI_MPY1 : FuncUnit;
+def CVI_LD : FuncUnit;
+
+// Combined functional units.
+def CVI_XLSHF : FuncUnit;
+def CVI_MPY01 : FuncUnit;
+def CVI_ALL : FuncUnit;
+def CVI_ALL_NOMEM : FuncUnit;
+
+// Combined functional unit data.
+def HexagonComboFuncsV60 :
+ ComboFuncUnits<[
+ ComboFuncData<CVI_XLSHF , [CVI_XLANE, CVI_SHIFT]>,
+ ComboFuncData<CVI_MPY01 , [CVI_MPY0, CVI_MPY1]>,
+ ComboFuncData<CVI_ALL , [CVI_ST, CVI_XLANE, CVI_SHIFT,
+ CVI_MPY0, CVI_MPY1, CVI_LD]>,
+ ComboFuncData<CVI_ALL_NOMEM, [CVI_XLANE, CVI_SHIFT, CVI_MPY0, CVI_MPY1]>
+ ]>;
+
+// Itinerary classes.
+def PSEUDO : InstrItinClass;
+def PSEUDOM : InstrItinClass;
+def DUPLEX : InstrItinClass;
+def tc_ENDLOOP : InstrItinClass;
+
+//===----------------------------------------------------------------------===//
+// Auto-generated itinerary classes
+//===----------------------------------------------------------------------===//
+include "HexagonDepIICScalar.td"
+include "HexagonDepIICHVX.td"
+
//===----------------------------------------------------------------------===//
// V4 Machine Info +
//===----------------------------------------------------------------------===//
@@ -20,9 +69,9 @@ include "HexagonScheduleV55.td"
// V60 Machine Info -
//===----------------------------------------------------------------------===//
-include "HexagonScheduleV60.td"
include "HexagonIICScalar.td"
include "HexagonIICHVX.td"
+include "HexagonScheduleV60.td"
//===----------------------------------------------------------------------===//
// V62 Machine Info +
diff --git a/lib/Target/Hexagon/HexagonScheduleV4.td b/lib/Target/Hexagon/HexagonScheduleV4.td
index 880cc0a02b6a..69b704a805b8 100644
--- a/lib/Target/Hexagon/HexagonScheduleV4.td
+++ b/lib/Target/Hexagon/HexagonScheduleV4.td
@@ -7,200 +7,31 @@
//
//===----------------------------------------------------------------------===//
-// There are four SLOTS (four parallel pipelines) in Hexagon V4 machine.
-// This file describes that machine information.
-
-//
-// |===========|==================================================|
-// | PIPELINE | Instruction Classes |
-// |===========|==================================================|
-// | SLOT0 | LD ST ALU32 MEMOP NV SYSTEM |
-// |-----------|--------------------------------------------------|
-// | SLOT1 | LD ST ALU32 |
-// |-----------|--------------------------------------------------|
-// | SLOT2 | XTYPE ALU32 J JR |
-// |-----------|--------------------------------------------------|
-// | SLOT3 | XTYPE ALU32 J CR |
-// |===========|==================================================|
-
-// Functional Units.
-def SLOT0 : FuncUnit;
-def SLOT1 : FuncUnit;
-def SLOT2 : FuncUnit;
-def SLOT3 : FuncUnit;
-// Endloop is a pseudo instruction that is encoded with 2 bits in a packet
-// rather than taking an execution slot. This special unit is needed
-// to schedule an ENDLOOP with 4 other instructions.
-def SLOT_ENDLOOP: FuncUnit;
-
-// Itinerary classes.
-def PSEUDO : InstrItinClass;
-def PSEUDOM : InstrItinClass;
-// ALU64/M/S Instruction classes of V2 are collectively knownn as XTYPE in V4.
-def DUPLEX : InstrItinClass;
-def PREFIX : InstrItinClass;
-def COMPOUND_CJ_ARCHDEPSLOT : InstrItinClass;
-def COMPOUND : InstrItinClass;
+def LD_tc_ld_SLOT01 : InstrItinClass;
+def ST_tc_st_SLOT01 : InstrItinClass;
+
+class HexagonV4PseudoItin {
+ list<InstrItinData> V4PseudoItin_list = [
+ InstrItinData<PSEUDO, [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3]>]>,
+ InstrItinData<PSEUDOM, [InstrStage<1, [SLOT2, SLOT3], 0>,
+ InstrStage<1, [SLOT2, SLOT3]>]>,
+ InstrItinData<DUPLEX, [InstrStage<1, [SLOT0]>]>,
+ InstrItinData<tc_ENDLOOP, [InstrStage<1, [SLOT_ENDLOOP]>]>
+ ];
+}
-def ALU32_2op_tc_1_SLOT0123 : InstrItinClass;
-def ALU32_2op_tc_2early_SLOT0123 : InstrItinClass;
-def ALU32_3op_tc_2early_SLOT0123 : InstrItinClass;
-def ALU32_3op_tc_1_SLOT0123 : InstrItinClass;
-def ALU32_3op_tc_2_SLOT0123 : InstrItinClass;
-def ALU32_ADDI_tc_1_SLOT0123 : InstrItinClass;
-def ALU64_tc_1_SLOT23 : InstrItinClass;
-def ALU64_tc_2_SLOT23 : InstrItinClass;
-def ALU64_tc_2early_SLOT23 : InstrItinClass;
-def ALU64_tc_3x_SLOT23 : InstrItinClass;
-def CR_tc_2_SLOT3 : InstrItinClass;
-def CR_tc_2early_SLOT23 : InstrItinClass;
-def CR_tc_2early_SLOT3 : InstrItinClass;
-def CR_tc_3x_SLOT23 : InstrItinClass;
-def CR_tc_3x_SLOT3 : InstrItinClass;
-def J_tc_2early_SLOT23 : InstrItinClass;
-def J_tc_2early_CJUMP_UCJUMP_ARCHDEPSLOT : InstrItinClass;
-def J_tc_2early_SLOT2 : InstrItinClass;
-def LD_tc_ld_SLOT01 : InstrItinClass;
-def LD_tc_ld_pi_SLOT01 : InstrItinClass;
-def LD_tc_ld_SLOT0 : InstrItinClass;
-def LD_tc_3or4stall_SLOT0 : InstrItinClass;
-def M_tc_2_SLOT23 : InstrItinClass;
-def M_tc_2_acc_SLOT23 : InstrItinClass;
-def M_tc_3_SLOT23 : InstrItinClass;
-def M_tc_1_SLOT23 : InstrItinClass;
-def M_tc_3x_SLOT23 : InstrItinClass;
-def M_tc_3x_acc_SLOT23 : InstrItinClass;
-def M_tc_3or4x_SLOT23 : InstrItinClass;
-def M_tc_3or4x_acc_SLOT23 : InstrItinClass;
-def ST_tc_st_SLOT01 : InstrItinClass;
-def ST_tc_st_pi_SLOT01 : InstrItinClass;
-def ST_tc_st_SLOT0 : InstrItinClass;
-def ST_tc_st_pi_SLOT0 : InstrItinClass;
-def ST_tc_ld_SLOT0 : InstrItinClass;
-def ST_tc_3stall_SLOT0 : InstrItinClass;
-def S_2op_tc_1_SLOT23 : InstrItinClass;
-def S_2op_tc_2_SLOT23 : InstrItinClass;
-def S_2op_tc_2early_SLOT23 : InstrItinClass;
-def S_2op_tc_3or4x_SLOT23 : InstrItinClass;
-def S_3op_tc_1_SLOT23 : InstrItinClass;
-def S_3op_tc_2_SLOT23 : InstrItinClass;
-def S_3op_tc_2early_SLOT23 : InstrItinClass;
-def S_3op_tc_3_SLOT23 : InstrItinClass;
-def S_3op_tc_3x_SLOT23 : InstrItinClass;
-def NCJ_tc_3or4stall_SLOT0 : InstrItinClass;
-def V2LDST_tc_ld_SLOT01 : InstrItinClass;
-def V2LDST_tc_st_SLOT0 : InstrItinClass;
-def V2LDST_tc_st_SLOT01 : InstrItinClass;
-def V4LDST_tc_ld_SLOT01 : InstrItinClass;
-def V4LDST_tc_st_SLOT0 : InstrItinClass;
-def V4LDST_tc_st_SLOT01 : InstrItinClass;
-def J_tc_2early_SLOT0123 : InstrItinClass;
-def EXTENDER_tc_1_SLOT0123 : InstrItinClass;
-def S_3op_tc_3stall_SLOT23 : InstrItinClass;
+def HexagonV4ItinList : DepScalarItinV4, HexagonV4PseudoItin {
+ list<InstrItinData> V4Itin_list = [
+ InstrItinData<LD_tc_ld_SLOT01, [InstrStage<1, [SLOT0, SLOT1]>]>,
+ InstrItinData<ST_tc_st_SLOT01, [InstrStage<1, [SLOT0, SLOT1]>]>
+ ];
+ list<InstrItinData> ItinList =
+ !listconcat(V4Itin_list, DepScalarItinV4_list, V4PseudoItin_list);
+}
def HexagonItinerariesV4 :
- ProcessorItineraries<[SLOT0, SLOT1, SLOT2, SLOT3, SLOT_ENDLOOP], [], [
- // ALU32
- InstrItinData<ALU32_2op_tc_1_SLOT0123 ,
- [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3]>]>,
- InstrItinData<ALU32_2op_tc_2early_SLOT0123,
- [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3]>]>,
- InstrItinData<ALU32_3op_tc_1_SLOT0123 ,
- [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3]>]>,
- InstrItinData<ALU32_3op_tc_2early_SLOT0123,
- [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3]>]>,
- InstrItinData<ALU32_3op_tc_2_SLOT0123 ,
- [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3]>]>,
- InstrItinData<ALU32_ADDI_tc_1_SLOT0123 ,
- [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3]>]>,
-
- // ALU64
- InstrItinData<ALU64_tc_1_SLOT23 , [InstrStage<1, [SLOT2, SLOT3]>]>,
- InstrItinData<ALU64_tc_2_SLOT23 , [InstrStage<1, [SLOT2, SLOT3]>]>,
- InstrItinData<ALU64_tc_2early_SLOT23 , [InstrStage<1, [SLOT2, SLOT3]>]>,
- InstrItinData<ALU64_tc_3x_SLOT23 , [InstrStage<1, [SLOT2, SLOT3]>]>,
-
- // CR -> System
- InstrItinData<CR_tc_2_SLOT3 , [InstrStage<1, [SLOT3]>]>,
- InstrItinData<CR_tc_2early_SLOT3 , [InstrStage<1, [SLOT3]>]>,
- InstrItinData<CR_tc_3x_SLOT3 , [InstrStage<1, [SLOT3]>]>,
-
- // Jump (conditional/unconditional/return etc)
- // CR
- InstrItinData<CR_tc_2early_SLOT23 , [InstrStage<1, [SLOT2, SLOT3]>]>,
- InstrItinData<CR_tc_3x_SLOT23 , [InstrStage<1, [SLOT2, SLOT3]>]>,
- // J
- InstrItinData<J_tc_2early_SLOT23 , [InstrStage<1, [SLOT2, SLOT3]>]>,
- InstrItinData<J_tc_2early_CJUMP_UCJUMP_ARCHDEPSLOT , [InstrStage<1, [SLOT2, SLOT3]>]>,
- // JR
- InstrItinData<J_tc_2early_SLOT2 , [InstrStage<1, [SLOT2]>]>,
-
- //Load
- InstrItinData<LD_tc_ld_SLOT01 , [InstrStage<1, [SLOT0, SLOT1]>]>,
- InstrItinData<LD_tc_ld_pi_SLOT01 , [InstrStage<1, [SLOT0, SLOT1]>]>,
- InstrItinData<LD_tc_ld_SLOT0 , [InstrStage<1, [SLOT0]>]>,
- InstrItinData<LD_tc_3or4stall_SLOT0 , [InstrStage<1, [SLOT0]>]>,
-
- // M
- InstrItinData<M_tc_1_SLOT23 , [InstrStage<1, [SLOT2, SLOT3]>]>,
- InstrItinData<M_tc_2_SLOT23 , [InstrStage<1, [SLOT2, SLOT3]>]>,
- InstrItinData<M_tc_2_acc_SLOT23 , [InstrStage<1, [SLOT2, SLOT3]>]>,
- InstrItinData<M_tc_3_SLOT23 , [InstrStage<1, [SLOT2, SLOT3]>]>,
- InstrItinData<M_tc_3x_SLOT23 , [InstrStage<1, [SLOT2, SLOT3]>]>,
- InstrItinData<M_tc_3x_acc_SLOT23 , [InstrStage<1, [SLOT2, SLOT3]>]>,
- InstrItinData<M_tc_3or4x_SLOT23 , [InstrStage<1, [SLOT2, SLOT3]>]>,
- InstrItinData<M_tc_3or4x_acc_SLOT23 , [InstrStage<1, [SLOT2, SLOT3]>]>,
-
- // Store
- // ST
- InstrItinData<ST_tc_st_SLOT01 , [InstrStage<1, [SLOT0, SLOT1]>]>,
- InstrItinData<ST_tc_st_pi_SLOT01 , [InstrStage<1, [SLOT0, SLOT1]>]>,
- // ST0
- InstrItinData<ST_tc_st_SLOT0 , [InstrStage<1, [SLOT0]>]>,
- InstrItinData<ST_tc_st_pi_SLOT0 , [InstrStage<1, [SLOT0]>]>,
- InstrItinData<ST_tc_ld_SLOT0 , [InstrStage<1, [SLOT0]>]>,
-
- // S
- InstrItinData<S_2op_tc_1_SLOT23 , [InstrStage<1, [SLOT2, SLOT3]>]>,
- InstrItinData<S_2op_tc_2_SLOT23 , [InstrStage<1, [SLOT2, SLOT3]>]>,
- InstrItinData<S_2op_tc_2early_SLOT23 , [InstrStage<1, [SLOT2, SLOT3]>]>,
- InstrItinData<S_2op_tc_3or4x_SLOT23 , [InstrStage<1, [SLOT2, SLOT3]>]>,
- InstrItinData<S_3op_tc_1_SLOT23 , [InstrStage<1, [SLOT2, SLOT3]>]>,
- InstrItinData<S_3op_tc_2early_SLOT23 , [InstrStage<1, [SLOT2, SLOT3]>]>,
- InstrItinData<S_3op_tc_2_SLOT23 , [InstrStage<1, [SLOT2, SLOT3]>]>,
- InstrItinData<S_3op_tc_3_SLOT23 , [InstrStage<1, [SLOT2, SLOT3]>]>,
- InstrItinData<S_3op_tc_3x_SLOT23 , [InstrStage<1, [SLOT2, SLOT3]>]>,
- InstrItinData<S_3op_tc_3stall_SLOT23 , [InstrStage<3, [SLOT2, SLOT3]>]>,
-
- // SYS
- InstrItinData<ST_tc_3stall_SLOT0 , [InstrStage<1, [SLOT0]>]>,
-
- // New Value Compare Jump
- InstrItinData<NCJ_tc_3or4stall_SLOT0 , [InstrStage<1, [SLOT0]>]>,
-
- // Mem ops - MEM_V4
- InstrItinData<V2LDST_tc_st_SLOT0 , [InstrStage<1, [SLOT0]>]>,
- InstrItinData<V2LDST_tc_ld_SLOT01 , [InstrStage<1, [SLOT0, SLOT1]>]>,
- InstrItinData<V2LDST_tc_st_SLOT01 , [InstrStage<1, [SLOT0, SLOT1]>]>,
- InstrItinData<V4LDST_tc_st_SLOT0 , [InstrStage<1, [SLOT0]>]>,
- InstrItinData<V4LDST_tc_ld_SLOT01 , [InstrStage<1, [SLOT0, SLOT1]>]>,
- InstrItinData<V4LDST_tc_st_SLOT01 , [InstrStage<1, [SLOT0, SLOT1]>]>,
-
- InstrItinData<DUPLEX , [InstrStage<1, [SLOT0]>]>,
-
- // ENDLOOP
- InstrItinData<J_tc_2early_SLOT0123 , [InstrStage<1, [SLOT_ENDLOOP]>]>,
-
- // Extender/PREFIX
- InstrItinData<EXTENDER_tc_1_SLOT0123,
- [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3]>]>,
-
- InstrItinData<COMPOUND_CJ_ARCHDEPSLOT , [InstrStage<1, [SLOT2, SLOT3]>]>,
- InstrItinData<COMPOUND , [InstrStage<1, [SLOT2, SLOT3]>]>,
- InstrItinData<PSEUDO , [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3]>]>,
- InstrItinData<PSEUDOM, [InstrStage<1, [SLOT2, SLOT3], 0>,
- InstrStage<1, [SLOT2, SLOT3]>]>
- ]>;
+ ProcessorItineraries<[SLOT0, SLOT1, SLOT2, SLOT3, SLOT_ENDLOOP],
+ [Hex_FWD], HexagonV4ItinList.ItinList>;
def HexagonModelV4 : SchedMachineModel {
// Max issue per cycle == bundle width.
diff --git a/lib/Target/Hexagon/HexagonScheduleV55.td b/lib/Target/Hexagon/HexagonScheduleV55.td
index 06cbcb16abb7..ca738be5d6ef 100644
--- a/lib/Target/Hexagon/HexagonScheduleV55.td
+++ b/lib/Target/Hexagon/HexagonScheduleV55.td
@@ -1,4 +1,4 @@
-//=-HexagonScheduleV4.td - HexagonV4 Scheduling Definitions --*- tablegen -*-=//
+//=-HexagonScheduleV55.td - HexagonV55 Scheduling Definitions -*- tablegen -*=//
//
// The LLVM Compiler Infrastructure
//
@@ -7,190 +7,33 @@
//
//===----------------------------------------------------------------------===//
-// There are four SLOTS (four parallel pipelines) in Hexagon V4 machine.
-// This file describes that machine information.
-//
-// |===========|==================================================|
-// | PIPELINE | Instruction Classes |
-// |===========|==================================================|
-// | SLOT0 | LD ST ALU32 MEMOP NV SYSTEM |
-// |-----------|--------------------------------------------------|
-// | SLOT1 | LD ST ALU32 |
-// |-----------|--------------------------------------------------|
-// | SLOT2 | XTYPE ALU32 J JR |
-// |-----------|--------------------------------------------------|
-// | SLOT3 | XTYPE ALU32 J CR |
-// |===========|==================================================|
+class HexagonV55PseudoItin {
+ list<InstrItinData> V55PseudoItin_list = [
+ InstrItinData<PSEUDO, [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3]>],
+ [1, 1, 1]>,
+ InstrItinData<PSEUDOM, [InstrStage<1, [SLOT2, SLOT3], 0>,
+ InstrStage<1, [SLOT2, SLOT3]>], [1, 1, 1]>,
+ InstrItinData<DUPLEX, [InstrStage<1, [SLOT0]>], [1, 1, 1]>,
+ InstrItinData<tc_ENDLOOP, [InstrStage<1, [SLOT_ENDLOOP]>], [2]>
+ ];
+}
-def CJ_tc_1_SLOT23 : InstrItinClass;
-def CJ_tc_2early_SLOT23 : InstrItinClass;
-def COPROC_VMEM_vtc_long_SLOT01 : InstrItinClass;
-def COPROC_VX_vtc_long_SLOT23 : InstrItinClass;
-def COPROC_VX_vtc_SLOT23 : InstrItinClass;
-def J_tc_3stall_SLOT2 : InstrItinClass;
-def MAPPING_tc_1_SLOT0123 : InstrItinClass;
-def M_tc_3stall_SLOT23 : InstrItinClass;
+def HexagonV55ItinList : DepScalarItinV55,
+ HexagonV55PseudoItin {
+ list<InstrItinData> V55Itin_list = [
+ InstrItinData<LD_tc_ld_SLOT01, [InstrStage<1, [SLOT0, SLOT1]>], [2, 1]>,
+ InstrItinData<ST_tc_st_SLOT01, [InstrStage<1, [SLOT0, SLOT1]>],
+ [1, 1, 1]>
+ ];
+ list<InstrItinData> ItinList =
+ !listconcat(V55Itin_list, DepScalarItinV55_list,
+ V55PseudoItin_list);
+}
def HexagonItinerariesV55 :
- ProcessorItineraries<[SLOT0, SLOT1, SLOT2, SLOT3, SLOT_ENDLOOP], [], [
- // ALU32
- InstrItinData<ALU32_2op_tc_1_SLOT0123 ,
- [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3]>], [1, 1, 1]>,
- InstrItinData<ALU32_2op_tc_2early_SLOT0123,
- [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3]>], [2, 1, 1]>,
- InstrItinData<ALU32_3op_tc_1_SLOT0123 ,
- [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3]>], [1, 1, 1]>,
- InstrItinData<ALU32_3op_tc_2_SLOT0123 ,
- [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3]>], [2, 1, 1]>,
- InstrItinData<ALU32_3op_tc_2early_SLOT0123,
- [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3]>], [2, 1, 1]>,
- InstrItinData<ALU32_ADDI_tc_1_SLOT0123 ,
- [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3]>], [1, 1, 1]>,
-
- // ALU64
- InstrItinData<ALU64_tc_1_SLOT23 , [InstrStage<1, [SLOT2, SLOT3]>],
- [1, 1, 1]>,
- InstrItinData<ALU64_tc_2_SLOT23 , [InstrStage<1, [SLOT2, SLOT3]>],
- [2, 1, 1]>,
- InstrItinData<ALU64_tc_2early_SLOT23, [InstrStage<1, [SLOT2, SLOT3]>],
- [2, 1, 1]>,
- InstrItinData<ALU64_tc_3x_SLOT23 , [InstrStage<1, [SLOT2, SLOT3]>],
- [3, 1, 1]>,
-
- // CR -> System
- InstrItinData<CR_tc_2_SLOT3 , [InstrStage<1, [SLOT3]>], [2, 1, 1]>,
- InstrItinData<CR_tc_2early_SLOT3 , [InstrStage<1, [SLOT3]>], [2, 1, 1]>,
- InstrItinData<CR_tc_3x_SLOT3 , [InstrStage<1, [SLOT3]>], [3, 1, 1]>,
-
- // Jump (conditional/unconditional/return etc)
- InstrItinData<CR_tc_2early_SLOT23, [InstrStage<1, [SLOT2, SLOT3]>],
- [2, 1, 1, 1]>,
- InstrItinData<CR_tc_3x_SLOT23 , [InstrStage<1, [SLOT2, SLOT3]>],
- [3, 1, 1, 1]>,
- InstrItinData<CJ_tc_1_SLOT23 , [InstrStage<1, [SLOT2, SLOT3]>],
- [1, 1, 1, 1]>,
- InstrItinData<CJ_tc_2early_SLOT23, [InstrStage<1, [SLOT2, SLOT3]>],
- [2, 1, 1, 1]>,
- InstrItinData<J_tc_2early_SLOT23 , [InstrStage<1, [SLOT2, SLOT3]>],
- [2, 1, 1, 1]>,
- InstrItinData<J_tc_2early_CJUMP_UCJUMP_ARCHDEPSLOT,
- [InstrStage<1, [SLOT2, SLOT3]>], [2, 1, 1, 1]>,
-
- // JR
- InstrItinData<J_tc_2early_SLOT2 , [InstrStage<1, [SLOT2]>], [2, 1, 1]>,
- InstrItinData<J_tc_3stall_SLOT2 , [InstrStage<1, [SLOT2]>], [3, 1, 1]>,
-
- // Extender
- InstrItinData<EXTENDER_tc_1_SLOT0123,
- [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3]>], [1, 1, 1]>,
-
- // Load
- InstrItinData<LD_tc_ld_SLOT01 , [InstrStage<1, [SLOT0, SLOT1]>],
- [2, 1]>,
- InstrItinData<LD_tc_ld_pi_SLOT01 , [InstrStage<1, [SLOT0, SLOT1]>],
- [2, 1]>,
- InstrItinData<LD_tc_3or4stall_SLOT0, [InstrStage<1, [SLOT0]>], [2, 1]>,
- InstrItinData<LD_tc_ld_SLOT0 , [InstrStage<1, [SLOT0]>], [2, 1]>,
-
- // M
- InstrItinData<M_tc_1_SLOT23 , [InstrStage<1, [SLOT2, SLOT3]>],
- [1, 1, 1]>,
- InstrItinData<M_tc_2_SLOT23 , [InstrStage<1, [SLOT2, SLOT3]>],
- [2, 1, 1]>,
- InstrItinData<M_tc_2_acc_SLOT23 , [InstrStage<1, [SLOT2, SLOT3]>],
- [2, 1, 1]>,
- InstrItinData<M_tc_3_SLOT23 , [InstrStage<1, [SLOT2, SLOT3]>],
- [1, 1, 1]>,
- InstrItinData<M_tc_3x_SLOT23 , [InstrStage<1, [SLOT2, SLOT3]>],
- [3, 1, 1]>,
- InstrItinData<M_tc_3x_acc_SLOT23, [InstrStage<1, [SLOT2, SLOT3]>],
- [3, 1, 1, 1]>,
- InstrItinData<M_tc_3or4x_SLOT23 , [InstrStage<1, [SLOT2, SLOT3]>],
- [3, 1, 1]>,
- InstrItinData<M_tc_3or4x_acc_SLOT23 , [InstrStage<1, [SLOT2, SLOT3]>],
- [3, 1, 1]>,
- InstrItinData<M_tc_3stall_SLOT23, [InstrStage<1, [SLOT2, SLOT3]>],
- [3, 1, 1]>,
-
- // Store
- InstrItinData<ST_tc_st_SLOT01 , [InstrStage<1, [SLOT0, SLOT1]>],
- [1, 1, 1]>,
- InstrItinData<ST_tc_st_pi_SLOT01, [InstrStage<1, [SLOT0, SLOT1]>],
- [1, 1, 1]>,
- InstrItinData<ST_tc_3stall_SLOT0, [InstrStage<1, [SLOT0]>], [2, 1, 1]>,
- InstrItinData<ST_tc_ld_SLOT0 , [InstrStage<1, [SLOT0]>], [2, 1, 1]>,
- InstrItinData<ST_tc_st_SLOT0 , [InstrStage<1, [SLOT0]>], [1, 1, 1]>,
- InstrItinData<ST_tc_st_pi_SLOT0 , [InstrStage<1, [SLOT0]>], [1, 1, 1]>,
-
- // S
- InstrItinData<S_2op_tc_1_SLOT23 , [InstrStage<1, [SLOT2, SLOT3]>],
- [1, 1, 1]>,
- InstrItinData<S_2op_tc_2_SLOT23 , [InstrStage<1, [SLOT2, SLOT3]>],
- [2, 1, 1]>,
- InstrItinData<S_2op_tc_2early_SLOT23, [InstrStage<1, [SLOT2, SLOT3]>],
- [2, 1, 1]>,
- InstrItinData<S_2op_tc_3or4x_SLOT23 , [InstrStage<1, [SLOT2, SLOT3]>],
- [3, 1, 1]>,
- InstrItinData<S_3op_tc_1_SLOT23 , [InstrStage<1, [SLOT2, SLOT3]>],
- [1, 1, 1]>,
- InstrItinData<S_3op_tc_2_SLOT23 , [InstrStage<1, [SLOT2, SLOT3]>],
- [2, 1, 1]>,
- InstrItinData<S_3op_tc_2early_SLOT23, [InstrStage<1, [SLOT2, SLOT3]>],
- [2, 1, 1]>,
- InstrItinData<S_3op_tc_3_SLOT23 , [InstrStage<1, [SLOT2, SLOT3]>],
- [3, 1, 1]>,
- InstrItinData<S_3op_tc_3stall_SLOT23, [InstrStage<1, [SLOT2, SLOT3]>],
- [3, 1, 1]>,
- InstrItinData<S_3op_tc_3x_SLOT23 , [InstrStage<1, [SLOT2, SLOT3]>],
- [3, 1, 1]>,
-
- // New Value Compare Jump
- InstrItinData<NCJ_tc_3or4stall_SLOT0, [InstrStage<1, [SLOT0]>],
- [3, 1, 1, 1]>,
-
- // Mem ops
- InstrItinData<V2LDST_tc_st_SLOT0 , [InstrStage<1, [SLOT0]>],
- [1, 1, 1, 1]>,
- InstrItinData<V2LDST_tc_ld_SLOT01 , [InstrStage<1, [SLOT0, SLOT1]>],
- [2, 1, 1, 1]>,
- InstrItinData<V2LDST_tc_st_SLOT01 , [InstrStage<1, [SLOT0, SLOT1]>],
- [1, 1, 1, 1]>,
- InstrItinData<V4LDST_tc_st_SLOT0 , [InstrStage<1, [SLOT0]>],
- [1, 1, 1, 1]>,
- InstrItinData<V4LDST_tc_ld_SLOT01 , [InstrStage<1, [SLOT0, SLOT1]>],
- [3, 1, 1, 1]>,
- InstrItinData<V4LDST_tc_st_SLOT01 , [InstrStage<1, [SLOT0, SLOT1]>],
- [1, 1, 1, 1]>,
-
- // Endloop
- InstrItinData<J_tc_2early_SLOT0123, [InstrStage<1, [SLOT_ENDLOOP]>],
- [2]>,
-
- // Vector
- InstrItinData<COPROC_VMEM_vtc_long_SLOT01,
- [InstrStage<1, [SLOT0, SLOT1]>], [2, 1, 1, 1]>,
- InstrItinData<COPROC_VX_vtc_long_SLOT23 ,
- [InstrStage<1, [SLOT2, SLOT3]>], [3, 1, 1, 1]>,
- InstrItinData<COPROC_VX_vtc_SLOT23 ,
- [InstrStage<1, [SLOT2, SLOT3]>], [3, 1, 1, 1]>,
- InstrItinData<MAPPING_tc_1_SLOT0123 ,
- [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3]>],
- [1, 1, 1, 1]>,
-
- // Misc
- InstrItinData<COMPOUND_CJ_ARCHDEPSLOT , [InstrStage<1, [SLOT2, SLOT3]>],
- [1, 1, 1]>,
- InstrItinData<COMPOUND , [InstrStage<1, [SLOT2, SLOT3]>],
- [1, 1, 1]>,
- InstrItinData<DUPLEX , [InstrStage<1, [SLOT0]>], [1, 1, 1]>,
- InstrItinData<PREFIX , [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3]>],
- [1, 1, 1]>,
- InstrItinData<PSEUDO , [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3]>],
- [1, 1, 1]>,
- InstrItinData<PSEUDOM, [InstrStage<1, [SLOT2, SLOT3], 0>,
- InstrStage<1, [SLOT2, SLOT3]>], [1, 1, 1]>
- ]>;
+ ProcessorItineraries<[SLOT0, SLOT1, SLOT2, SLOT3, SLOT_ENDLOOP],
+ [Hex_FWD], HexagonV55ItinList.ItinList>;
def HexagonModelV55 : SchedMachineModel {
// Max issue per cycle == bundle width.
@@ -201,5 +44,5 @@ def HexagonModelV55 : SchedMachineModel {
}
//===----------------------------------------------------------------------===//
-// Hexagon V4 Resource Definitions -
+// Hexagon V55 Resource Definitions -
//===----------------------------------------------------------------------===//
diff --git a/lib/Target/Hexagon/HexagonScheduleV60.td b/lib/Target/Hexagon/HexagonScheduleV60.td
index 63784710f52b..a2544c92a72c 100644
--- a/lib/Target/Hexagon/HexagonScheduleV60.td
+++ b/lib/Target/Hexagon/HexagonScheduleV60.td
@@ -7,61 +7,6 @@
//
//===----------------------------------------------------------------------===//
-// CVI pipes from the "Hexagon Multimedia Co-Processor Extensions Arch Spec".
-def CVI_ST : FuncUnit;
-def CVI_XLANE : FuncUnit;
-def CVI_SHIFT : FuncUnit;
-def CVI_MPY0 : FuncUnit;
-def CVI_MPY1 : FuncUnit;
-def CVI_LD : FuncUnit;
-
-// Combined functional units.
-def CVI_XLSHF : FuncUnit;
-def CVI_MPY01 : FuncUnit;
-def CVI_ALL : FuncUnit;
-def CVI_XLMPY0 : FuncUnit;
-def CVI_SHFMPY1: FuncUnit;
-
-// Combined functional unit data.
-def HexagonComboFuncsV60 :
- ComboFuncUnits<[
- ComboFuncData<CVI_XLSHF , [CVI_XLANE, CVI_SHIFT]>,
- ComboFuncData<CVI_MPY01 , [CVI_MPY0, CVI_MPY1]>,
- ComboFuncData<CVI_ALL , [CVI_ST, CVI_XLANE, CVI_SHIFT,
- CVI_MPY0, CVI_MPY1, CVI_LD]>,
- ComboFuncData<CVI_XLMPY0 , [CVI_XLANE, CVI_MPY0]>,
- ComboFuncData<CVI_SHFMPY1 , [CVI_SHIFT, CVI_MPY1]>
- ]>;
-
-// Note: When adding additional vector scheduling classes, add the
-// corresponding methods to the class HexagonInstrInfo.
-def CVI_VA : InstrItinClass;
-def CVI_VA_DV : InstrItinClass;
-def CVI_VX_LONG : InstrItinClass;
-def CVI_VX_LATE : InstrItinClass;
-def CVI_VX : InstrItinClass;
-def CVI_VX_DV_LONG : InstrItinClass;
-def CVI_VX_DV : InstrItinClass;
-def CVI_VX_DV_SLOT2 : InstrItinClass;
-def CVI_VX_DV_SLOT2_LONG_EARLY : InstrItinClass;
-def CVI_VP : InstrItinClass;
-def CVI_VP_LONG : InstrItinClass;
-def CVI_VP_VS_EARLY : InstrItinClass;
-def CVI_VP_VS_LONG_EARLY : InstrItinClass;
-def CVI_VP_VS_LONG : InstrItinClass;
-def CVI_VP_VS : InstrItinClass;
-def CVI_VP_DV : InstrItinClass;
-def CVI_VS : InstrItinClass;
-def CVI_VINLANESAT : InstrItinClass;
-def CVI_VM_LD : InstrItinClass;
-def CVI_VM_TMP_LD : InstrItinClass;
-def CVI_VM_CUR_LD : InstrItinClass;
-def CVI_VM_VP_LDU : InstrItinClass;
-def CVI_VM_ST : InstrItinClass;
-def CVI_VM_NEW_ST : InstrItinClass;
-def CVI_VM_STU : InstrItinClass;
-def CVI_HIST : InstrItinClass;
-def CVI_VA_EXT : InstrItinClass;
// There are four SLOTS (four parallel pipelines) in Hexagon V60 machine.
// This file describes that machine information.
@@ -108,196 +53,20 @@ def CVI_VA_EXT : InstrItinClass;
// S0123| CVI_VA_EXT Extract |
// |=====================================================================|
+def HexagonV60ItinList : DepScalarItinV60, ScalarItin,
+ DepHVXItinV60,
+ HVXItin, PseudoItin {
+ list<InstrItinData> ItinList =
+ !listconcat(DepScalarItinV60_list, ScalarItin_list,
+ DepHVXItinV60_list, HVXItin_list, PseudoItin_list);
+}
+
def HexagonItinerariesV60 :
ProcessorItineraries<[SLOT0, SLOT1, SLOT2, SLOT3, SLOT_ENDLOOP,
CVI_ST, CVI_XLANE, CVI_SHIFT, CVI_MPY0, CVI_MPY1,
- CVI_LD, CVI_XLSHF, CVI_MPY01, CVI_ALL], [], [
- // ALU32
- InstrItinData<ALU32_2op_tc_1_SLOT0123 ,
- [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3]>]>,
- InstrItinData<ALU32_2op_tc_2early_SLOT0123,
- [InstrStage<2, [SLOT0, SLOT1, SLOT2, SLOT3]>]>,
- InstrItinData<ALU32_3op_tc_1_SLOT0123 ,
- [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3]>]>,
- InstrItinData<ALU32_3op_tc_2_SLOT0123 ,
- [InstrStage<2, [SLOT0, SLOT1, SLOT2, SLOT3]>]>,
- InstrItinData<ALU32_3op_tc_2early_SLOT0123,
- [InstrStage<2, [SLOT0, SLOT1, SLOT2, SLOT3]>]>,
- InstrItinData<ALU32_ADDI_tc_1_SLOT0123 ,
- [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3]>]>,
-
- // ALU64
- InstrItinData<ALU64_tc_1_SLOT23 , [InstrStage<1, [SLOT2, SLOT3]>]>,
- InstrItinData<ALU64_tc_2_SLOT23 , [InstrStage<2, [SLOT2, SLOT3]>]>,
- InstrItinData<ALU64_tc_2early_SLOT23, [InstrStage<2, [SLOT2, SLOT3]>]>,
- InstrItinData<ALU64_tc_3x_SLOT23 , [InstrStage<3, [SLOT2, SLOT3]>]>,
-
- // CR -> System
- InstrItinData<CR_tc_2_SLOT3 , [InstrStage<2, [SLOT3]>]>,
- InstrItinData<CR_tc_2early_SLOT3 , [InstrStage<2, [SLOT3]>]>,
- InstrItinData<CR_tc_3x_SLOT3 , [InstrStage<3, [SLOT3]>]>,
-
- // Jump (conditional/unconditional/return etc)
- InstrItinData<CR_tc_2early_SLOT23, [InstrStage<2, [SLOT2, SLOT3]>]>,
- InstrItinData<CR_tc_3x_SLOT23 , [InstrStage<3, [SLOT2, SLOT3]>]>,
- InstrItinData<CJ_tc_1_SLOT23 , [InstrStage<1, [SLOT2, SLOT3]>]>,
- InstrItinData<CJ_tc_2early_SLOT23, [InstrStage<2, [SLOT2, SLOT3]>]>,
- InstrItinData<J_tc_2early_SLOT23 , [InstrStage<2, [SLOT2, SLOT3]>]>,
- InstrItinData<J_tc_2early_CJUMP_UCJUMP_ARCHDEPSLOT , [InstrStage<1, [SLOT2, SLOT3]>]>,
-
- // JR
- InstrItinData<J_tc_2early_SLOT2 , [InstrStage<2, [SLOT2]>]>,
- InstrItinData<J_tc_3stall_SLOT2 , [InstrStage<3, [SLOT2]>]>,
-
- // Extender
- InstrItinData<EXTENDER_tc_1_SLOT0123, [InstrStage<1,
- [SLOT0, SLOT1, SLOT2, SLOT3]>]>,
-
- // Load
- InstrItinData<LD_tc_ld_SLOT01 , [InstrStage<3, [SLOT0, SLOT1]>]>,
- InstrItinData<LD_tc_ld_pi_SLOT01 , [InstrStage<3, [SLOT0, SLOT1]>]>,
- InstrItinData<LD_tc_3or4stall_SLOT0, [InstrStage<4, [SLOT0]>]>,
- InstrItinData<LD_tc_ld_SLOT0 , [InstrStage<3, [SLOT0]>]>,
-
- // M
- InstrItinData<M_tc_1_SLOT23 , [InstrStage<1, [SLOT2, SLOT3]>]>,
- InstrItinData<M_tc_2_SLOT23 , [InstrStage<2, [SLOT2, SLOT3]>]>,
- InstrItinData<M_tc_2_acc_SLOT23 , [InstrStage<2, [SLOT2, SLOT3]>]>,
- InstrItinData<M_tc_3_SLOT23 , [InstrStage<3, [SLOT2, SLOT3]>]>,
- InstrItinData<M_tc_3x_SLOT23 , [InstrStage<3, [SLOT2, SLOT3]>]>,
- InstrItinData<M_tc_3x_acc_SLOT23, [InstrStage<3, [SLOT2, SLOT3]>]>,
- InstrItinData<M_tc_3or4x_SLOT23 , [InstrStage<4, [SLOT2, SLOT3]>]>,
- InstrItinData<M_tc_3or4x_acc_SLOT23 , [InstrStage<4, [SLOT2, SLOT3]>]>,
- InstrItinData<M_tc_3stall_SLOT23, [InstrStage<3, [SLOT2, SLOT3]>]>,
-
- // Store
- InstrItinData<ST_tc_st_SLOT01 , [InstrStage<1, [SLOT0, SLOT1]>]>,
- InstrItinData<ST_tc_st_pi_SLOT01, [InstrStage<1, [SLOT0, SLOT1]>]>,
- InstrItinData<ST_tc_3stall_SLOT0, [InstrStage<3, [SLOT0]>]>,
- InstrItinData<ST_tc_ld_SLOT0 , [InstrStage<3, [SLOT0]>]>,
- InstrItinData<ST_tc_st_SLOT0 , [InstrStage<1, [SLOT0]>]>,
- InstrItinData<ST_tc_st_pi_SLOT0 , [InstrStage<1, [SLOT0]>]>,
-
- // S
- InstrItinData<S_2op_tc_1_SLOT23 , [InstrStage<1, [SLOT2, SLOT3]>]>,
- InstrItinData<S_2op_tc_2_SLOT23 , [InstrStage<2, [SLOT2, SLOT3]>]>,
- InstrItinData<S_2op_tc_2early_SLOT23, [InstrStage<2, [SLOT2, SLOT3]>]>,
- // The S_2op_tc_3x_SLOT23 slots are 4 cycles on v60.
- InstrItinData<S_2op_tc_3or4x_SLOT23 , [InstrStage<4, [SLOT2, SLOT3]>]>,
- InstrItinData<S_3op_tc_1_SLOT23 , [InstrStage<1, [SLOT2, SLOT3]>]>,
- InstrItinData<S_3op_tc_2_SLOT23 , [InstrStage<2, [SLOT2, SLOT3]>]>,
- InstrItinData<S_3op_tc_2early_SLOT23, [InstrStage<2, [SLOT2, SLOT3]>]>,
- InstrItinData<S_3op_tc_3_SLOT23 , [InstrStage<3, [SLOT2, SLOT3]>]>,
- InstrItinData<S_3op_tc_3stall_SLOT23, [InstrStage<3, [SLOT2, SLOT3]>]>,
- InstrItinData<S_3op_tc_3x_SLOT23 , [InstrStage<3, [SLOT2, SLOT3]>]>,
-
- // New Value Compare Jump
- InstrItinData<NCJ_tc_3or4stall_SLOT0, [InstrStage<4, [SLOT0]>]>,
-
- // Mem ops
- InstrItinData<V2LDST_tc_st_SLOT0 , [InstrStage<1, [SLOT0]>]>,
- InstrItinData<V2LDST_tc_ld_SLOT01 , [InstrStage<2, [SLOT0, SLOT1]>]>,
- InstrItinData<V2LDST_tc_st_SLOT01 , [InstrStage<1, [SLOT0, SLOT1]>]>,
- InstrItinData<V4LDST_tc_st_SLOT0 , [InstrStage<1, [SLOT0]>]>,
- InstrItinData<V4LDST_tc_ld_SLOT01 , [InstrStage<3, [SLOT0, SLOT1]>]>,
- InstrItinData<V4LDST_tc_st_SLOT01 , [InstrStage<1, [SLOT0, SLOT1]>]>,
-
- // Endloop
- InstrItinData<J_tc_2early_SLOT0123, [InstrStage<2, [SLOT_ENDLOOP]>]>,
-
- // Vector
- InstrItinData<COPROC_VMEM_vtc_long_SLOT01,
- [InstrStage<3, [SLOT0, SLOT1]>]>,
- InstrItinData<COPROC_VX_vtc_long_SLOT23 ,
- [InstrStage<3, [SLOT2, SLOT3]>]>,
- InstrItinData<COPROC_VX_vtc_SLOT23 ,
- [InstrStage<3, [SLOT2, SLOT3]>]>,
- InstrItinData<MAPPING_tc_1_SLOT0123 ,
- [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3]>]>,
-
- // Duplex and Compound
- InstrItinData<DUPLEX , [InstrStage<1, [SLOT0]>]>,
- InstrItinData<COMPOUND_CJ_ARCHDEPSLOT , [InstrStage<1, [SLOT2, SLOT3]>]>,
- InstrItinData<COMPOUND , [InstrStage<1, [SLOT2, SLOT3]>]>,
- // Misc
- InstrItinData<PREFIX , [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3]>]>,
- InstrItinData<PSEUDO , [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3]>]>,
- InstrItinData<PSEUDOM , [InstrStage<1, [SLOT2, SLOT3], 0>,
- InstrStage<1, [SLOT2, SLOT3]>]>,
-
- // Latest CVI spec definitions.
- InstrItinData<CVI_VA,[InstrStage<1, [SLOT0,SLOT1,SLOT2,SLOT3], 0>,
- InstrStage<1, [CVI_XLANE,CVI_SHIFT,
- CVI_MPY0, CVI_MPY1]>]>,
- InstrItinData<CVI_VA_DV,
- [InstrStage<1, [SLOT0,SLOT1,SLOT2,SLOT3], 0>,
- InstrStage<1, [CVI_XLSHF, CVI_MPY01]>]>,
- InstrItinData<CVI_VX_LONG, [InstrStage<1, [SLOT2, SLOT3], 0>,
- InstrStage<1, [CVI_MPY0, CVI_MPY1]>]>,
- InstrItinData<CVI_VX_LATE, [InstrStage<1, [SLOT2, SLOT3], 0>,
- InstrStage<1, [CVI_MPY0, CVI_MPY1]>]>,
- InstrItinData<CVI_VX,[InstrStage<1, [SLOT2, SLOT3], 0>,
- InstrStage<1, [CVI_MPY0, CVI_MPY1]>]>,
- InstrItinData<CVI_VX_DV_LONG,
- [InstrStage<1, [SLOT2, SLOT3], 0>,
- InstrStage<1, [CVI_MPY01]>]>,
- InstrItinData<CVI_VX_DV,
- [InstrStage<1, [SLOT2, SLOT3], 0>,
- InstrStage<1, [CVI_MPY01]>]>,
- InstrItinData<CVI_VX_DV_SLOT2,
- [InstrStage<1, [SLOT2], 0>,
- InstrStage<1, [CVI_MPY01]>]>,
- InstrItinData<CVI_VP, [InstrStage<1, [SLOT0,SLOT1,SLOT2,SLOT3], 0>,
- InstrStage<1, [CVI_XLANE]>]>,
- InstrItinData<CVI_VP_LONG, [InstrStage<1, [SLOT0,SLOT1,SLOT2,SLOT3], 0>,
- InstrStage<1, [CVI_XLANE]>]>,
- InstrItinData<CVI_VP_VS_EARLY,
- [InstrStage<1, [SLOT0,SLOT1,SLOT2,SLOT3], 0>,
- InstrStage<1, [CVI_XLSHF]>]>,
- InstrItinData<CVI_VP_VS_LONG,
- [InstrStage<1, [SLOT0,SLOT1,SLOT2,SLOT3], 0>,
- InstrStage<1, [CVI_XLSHF]>]>,
- InstrItinData<CVI_VP_VS,
- [InstrStage<1, [SLOT0,SLOT1,SLOT2,SLOT3], 0>,
- InstrStage<1, [CVI_XLSHF]>]>,
- InstrItinData<CVI_VP_VS_LONG_EARLY,
- [InstrStage<1, [SLOT0,SLOT1,SLOT2,SLOT3], 0>,
- InstrStage<1, [CVI_XLSHF]>]>,
- InstrItinData<CVI_VP_DV , [InstrStage<1, [SLOT0,SLOT1,SLOT2,SLOT3], 0>,
- InstrStage<1, [CVI_XLSHF]>]>,
- InstrItinData<CVI_VS,
- [InstrStage<1, [SLOT0,SLOT1,SLOT2,SLOT3], 0>,
- InstrStage<1, [CVI_SHIFT]>]>,
- InstrItinData<CVI_VINLANESAT,
- [InstrStage<1, [SLOT0,SLOT1,SLOT2,SLOT3], 0>,
- InstrStage<1, [CVI_SHIFT]>]>,
- InstrItinData<CVI_VM_LD , [InstrStage<1, [SLOT0, SLOT1], 0>,
- InstrStage<1, [CVI_LD], 0>,
- InstrStage<1, [CVI_XLANE, CVI_SHIFT,
- CVI_MPY0, CVI_MPY1]>]>,
- InstrItinData<CVI_VM_TMP_LD,[InstrStage<1,[SLOT0, SLOT1], 0>,
- InstrStage<1, [CVI_LD]>]>,
- InstrItinData<CVI_VM_CUR_LD,[InstrStage<1,[SLOT0, SLOT1], 0>,
- InstrStage<1, [CVI_LD], 0>,
- InstrStage<1, [CVI_XLANE, CVI_SHIFT,
- CVI_MPY0, CVI_MPY1]>]>,
- InstrItinData<CVI_VM_VP_LDU,[InstrStage<1,[SLOT0], 0>,
- InstrStage<1, [SLOT1], 0>,
- InstrStage<1, [CVI_LD], 0>,
- InstrStage<1, [CVI_XLANE]>]>,
- InstrItinData<CVI_VM_ST , [InstrStage<1, [SLOT0], 0>,
- InstrStage<1, [CVI_ST], 0>,
- InstrStage<1, [CVI_XLANE, CVI_SHIFT,
- CVI_MPY0, CVI_MPY1]>]>,
- InstrItinData<CVI_VM_NEW_ST,[InstrStage<1,[SLOT0], 0>,
- InstrStage<1, [CVI_ST]>]>,
- InstrItinData<CVI_VM_STU , [InstrStage<1, [SLOT0], 0>,
- InstrStage<1, [SLOT1], 0>,
- InstrStage<1, [CVI_ST], 0>,
- InstrStage<1, [CVI_XLANE]>]>,
- InstrItinData<CVI_HIST , [InstrStage<1, [SLOT0,SLOT1,SLOT2,SLOT3], 0>,
- InstrStage<1, [CVI_ALL]>]>
- ]>;
+ CVI_LD, CVI_XLSHF, CVI_MPY01, CVI_ALL,
+ CVI_ALL_NOMEM],
+ [Hex_FWD, HVX_FWD], HexagonV60ItinList.ItinList>;
def HexagonModelV60 : SchedMachineModel {
// Max issue per cycle == bundle width.
diff --git a/lib/Target/Hexagon/HexagonScheduleV62.td b/lib/Target/Hexagon/HexagonScheduleV62.td
index 0758788a600b..a0a8595f185f 100644
--- a/lib/Target/Hexagon/HexagonScheduleV62.td
+++ b/lib/Target/Hexagon/HexagonScheduleV62.td
@@ -6,115 +6,23 @@
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
+//
+// ScalarItin contains some old itineraries still used by a
+// handful of instructions. Hopefully, we will be able to get rid of them soon.
-// V62 follows the same schedule as V60 with following exceptions:
-// Following instructions are permissible on any slot on V62:
-// V4_J4_cmpeq_fp0_jump_nt
-// V4_J4_cmpeq_fp0_jump_t
-// V4_J4_cmpeq_fp1_jump_nt
-// V4_J4_cmpeq_fp1_jump_t
-// V4_J4_cmpeq_tp0_jump_nt
-// V4_J4_cmpeq_tp0_jump_t
-// V4_J4_cmpeq_tp1_jump_nt
-// V4_J4_cmpeq_tp1_jump_t
-// V4_J4_cmpeqi_fp0_jump_nt
-// V4_J4_cmpeqi_fp0_jump_t
-// V4_J4_cmpeqi_fp1_jump_nt
-// V4_J4_cmpeqi_fp1_jump_t
-// V4_J4_cmpeqi_tp0_jump_nt
-// V4_J4_cmpeqi_tp0_jump_t
-// V4_J4_cmpeqi_tp1_jump_nt
-// V4_J4_cmpeqi_tp1_jump_t
-// V4_J4_cmpeqn1_fp0_jump_nt
-// V4_J4_cmpeqn1_fp0_jump_t
-// V4_J4_cmpeqn1_fp1_jump_nt
-// V4_J4_cmpeqn1_fp1_jump_t
-// V4_J4_cmpeqn1_tp0_jump_nt
-// V4_J4_cmpeqn1_tp0_jump_t
-// V4_J4_cmpeqn1_tp1_jump_nt
-// V4_J4_cmpeqn1_tp1_jump_t
-// V4_J4_cmpgt_fp0_jump_nt
-// V4_J4_cmpgt_fp0_jump_t
-// V4_J4_cmpgt_fp1_jump_nt
-// V4_J4_cmpgt_fp1_jump_t
-// V4_J4_cmpgt_tp0_jump_nt
-// V4_J4_cmpgt_tp0_jump_t
-// V4_J4_cmpgt_tp1_jump_nt
-// V4_J4_cmpgt_tp1_jump_t
-// V4_J4_cmpgti_fp0_jump_nt
-// V4_J4_cmpgti_fp0_jump_t
-// V4_J4_cmpgti_fp1_jump_nt
-// V4_J4_cmpgti_fp1_jump_t
-// V4_J4_cmpgti_tp0_jump_nt
-// V4_J4_cmpgti_tp0_jump_t
-// V4_J4_cmpgti_tp1_jump_nt
-// V4_J4_cmpgti_tp1_jump_t
-// V4_J4_cmpgtn1_fp0_jump_nt
-// V4_J4_cmpgtn1_fp0_jump_t
-// V4_J4_cmpgtn1_fp1_jump_nt
-// V4_J4_cmpgtn1_fp1_jump_t
-// V4_J4_cmpgtn1_tp0_jump_nt
-// V4_J4_cmpgtn1_tp0_jump_t
-// V4_J4_cmpgtn1_tp1_jump_nt
-// V4_J4_cmpgtn1_tp1_jump_t
-// V4_J4_cmpgtu_fp0_jump_nt
-// V4_J4_cmpgtu_fp0_jump_t
-// V4_J4_cmpgtu_fp1_jump_nt
-// V4_J4_cmpgtu_fp1_jump_t
-// V4_J4_cmpgtu_tp0_jump_nt
-// V4_J4_cmpgtu_tp0_jump_t
-// V4_J4_cmpgtu_tp1_jump_nt
-// V4_J4_cmpgtu_tp1_jump_t
-// V4_J4_cmpgtui_fp0_jump_nt
-// V4_J4_cmpgtui_fp0_jump_t
-// V4_J4_cmpgtui_fp1_jump_nt
-// V4_J4_cmpgtui_fp1_jump_t
-// V4_J4_cmpgtui_tp0_jump_nt
-// V4_J4_cmpgtui_tp0_jump_t
-// V4_J4_cmpgtui_tp1_jump_nt
-// V4_J4_cmpgtui_tp1_jump_t
-// V4_J4_tstbit0_fp0_jump_nt
-// V4_J4_tstbit0_fp0_jump_t
-// V4_J4_tstbit0_fp1_jump_nt
-// V4_J4_tstbit0_fp1_jump_t
-// V4_J4_tstbit0_tp0_jump_nt
-// V4_J4_tstbit0_tp0_jump_t
-// V4_J4_tstbit0_tp1_jump_nt
-// V4_J4_tstbit0_tp1_jump_t
-// JMP
-// JMPEXT
-// JMPEXT_f
-// JMPEXT_fnew_nt
-// JMPEXT_fnew_t
-// JMPEXT_t
-// JMPEXT_tnew_nt
-// JMPEXT_tnew_t
-// JMPNOTEXT
-// JMPNOTEXT_f
-// JMPNOTEXT_fnew_nt
-// JMPNOTEXT_fnew_t
-// JMPNOTEXT_t
-// JMPNOTEXT_tnew_nt
-// JMPNOTEXT_tnew_t
-// JMP_f
-// JMP_fnew_nt
-// JMP_fnew_t
-// JMP_t
-// JMP_tnew_nt
-// JMP_tnew_t
-// RESTORE_DEALLOC_RET_JMP_V4
-// RESTORE_DEALLOC_RET_JMP_V4_EXT
-
-def HexagonV62ItinList : ScalarItin, HVXV62Itin {
+def HexagonV62ItinList : DepScalarItinV62, ScalarItin,
+ DepHVXItinV62, HVXItin, PseudoItin {
list<InstrItinData> ItinList =
- !listconcat(ScalarItin_list, HVXV62Itin_list);
+ !listconcat(DepScalarItinV62_list, ScalarItin_list,
+ DepHVXItinV62_list, HVXItin_list, PseudoItin_list);
}
def HexagonItinerariesV62 :
ProcessorItineraries<[SLOT0, SLOT1, SLOT2, SLOT3, SLOT_ENDLOOP,
CVI_ST, CVI_XLANE, CVI_SHIFT, CVI_MPY0, CVI_MPY1,
- CVI_LD, CVI_XLSHF, CVI_MPY01, CVI_ALL],
- [], HexagonV62ItinList.ItinList>;
+ CVI_LD, CVI_XLSHF, CVI_MPY01, CVI_ALL,
+ CVI_ALL_NOMEM],
+ [Hex_FWD, HVX_FWD], HexagonV62ItinList.ItinList>;
def HexagonModelV62 : SchedMachineModel {
// Max issue per cycle == bundle width.
diff --git a/lib/Target/Hexagon/HexagonSubtarget.cpp b/lib/Target/Hexagon/HexagonSubtarget.cpp
index 033b93fc910a..8851a23ae8ac 100644
--- a/lib/Target/Hexagon/HexagonSubtarget.cpp
+++ b/lib/Target/Hexagon/HexagonSubtarget.cpp
@@ -73,6 +73,10 @@ static cl::opt<bool> OverrideLongCalls("hexagon-long-calls",
cl::Hidden, cl::ZeroOrMore, cl::init(false),
cl::desc("If present, forces/disables the use of long calls"));
+static cl::opt<bool> EnablePredicatedCalls("hexagon-pred-calls",
+ cl::Hidden, cl::ZeroOrMore, cl::init(false),
+ cl::desc("Consider calls to be predicable"));
+
void HexagonSubtarget::initializeEnvironment() {
UseMemOps = false;
ModeIEEERndNear = false;
@@ -139,6 +143,59 @@ HexagonSubtarget::HexagonSubtarget(const Triple &TT, StringRef CPU,
UseBSBScheduling = hasV60TOps() && EnableBSBSched;
}
+/// \brief Perform target specific adjustments to the latency of a schedule
+/// dependency.
+void HexagonSubtarget::adjustSchedDependency(SUnit *Src, SUnit *Dst,
+ SDep &Dep) const {
+ MachineInstr *SrcInst = Src->getInstr();
+ MachineInstr *DstInst = Dst->getInstr();
+ if (!Src->isInstr() || !Dst->isInstr())
+ return;
+
+ const HexagonInstrInfo *QII = getInstrInfo();
+
+ // Instructions with .new operands have zero latency.
+ SmallSet<SUnit *, 4> ExclSrc;
+ SmallSet<SUnit *, 4> ExclDst;
+ if (QII->canExecuteInBundle(*SrcInst, *DstInst) &&
+ isBestZeroLatency(Src, Dst, QII, ExclSrc, ExclDst)) {
+ Dep.setLatency(0);
+ return;
+ }
+
+ if (!hasV60TOps())
+ return;
+
+ // If it's a REG_SEQUENCE, use its destination instruction to determine
+ // the correct latency.
+ if (DstInst->isRegSequence() && Dst->NumSuccs == 1) {
+ unsigned RSeqReg = DstInst->getOperand(0).getReg();
+ MachineInstr *RSeqDst = Dst->Succs[0].getSUnit()->getInstr();
+ unsigned UseIdx = -1;
+ for (unsigned OpNum = 0; OpNum < RSeqDst->getNumOperands(); OpNum++) {
+ const MachineOperand &MO = RSeqDst->getOperand(OpNum);
+ if (MO.isReg() && MO.getReg() && MO.isUse() && MO.getReg() == RSeqReg) {
+ UseIdx = OpNum;
+ break;
+ }
+ }
+ unsigned RSeqLatency = (InstrInfo.getOperandLatency(&InstrItins, *SrcInst,
+ 0, *RSeqDst, UseIdx));
+ Dep.setLatency(RSeqLatency);
+ }
+
+ // Try to schedule uses near definitions to generate .cur.
+ ExclSrc.clear();
+ ExclDst.clear();
+ if (EnableDotCurSched && QII->isToBeScheduledASAP(*SrcInst, *DstInst) &&
+ isBestZeroLatency(Src, Dst, QII, ExclSrc, ExclDst)) {
+ Dep.setLatency(0);
+ return;
+ }
+
+ updateLatency(*SrcInst, *DstInst, Dep);
+}
+
void HexagonSubtarget::HexagonDAGMutation::apply(ScheduleDAGInstrs *DAG) {
for (auto &SU : DAG->SUnits) {
@@ -154,19 +211,19 @@ void HexagonSubtarget::HexagonDAGMutation::apply(ScheduleDAGInstrs *DAG) {
for (auto &SU : DAG->SUnits) {
// Update the latency of chain edges between v60 vector load or store
- // instructions to be 1. These instructions cannot be scheduled in the
+ // instructions to be 1. These instruction cannot be scheduled in the
// same packet.
MachineInstr &MI1 = *SU.getInstr();
auto *QII = static_cast<const HexagonInstrInfo*>(DAG->TII);
bool IsStoreMI1 = MI1.mayStore();
bool IsLoadMI1 = MI1.mayLoad();
- if (!QII->isV60VectorInstruction(MI1) || !(IsStoreMI1 || IsLoadMI1))
+ if (!QII->isHVXVec(MI1) || !(IsStoreMI1 || IsLoadMI1))
continue;
for (auto &SI : SU.Succs) {
if (SI.getKind() != SDep::Order || SI.getLatency() != 0)
continue;
MachineInstr &MI2 = *SI.getSUnit()->getInstr();
- if (!QII->isV60VectorInstruction(MI2))
+ if (!QII->isHVXVec(MI2))
continue;
if ((IsStoreMI1 && MI2.mayStore()) || (IsLoadMI1 && MI2.mayLoad())) {
SI.setLatency(1);
@@ -204,69 +261,99 @@ bool HexagonSubtarget::enableMachineScheduler() const {
return true;
}
-bool HexagonSubtarget::enableSubRegLiveness() const {
- return EnableSubregLiveness;
+bool HexagonSubtarget::usePredicatedCalls() const {
+ return EnablePredicatedCalls;
}
-// This helper function is responsible for increasing the latency only.
void HexagonSubtarget::updateLatency(MachineInstr &SrcInst,
MachineInstr &DstInst, SDep &Dep) const {
+ if (Dep.isArtificial()) {
+ Dep.setLatency(1);
+ return;
+ }
+
if (!hasV60TOps())
return;
auto &QII = static_cast<const HexagonInstrInfo&>(*getInstrInfo());
- if (EnableVecFrwdSched && QII.addLatencyToSchedule(SrcInst, DstInst)) {
- // Vec frwd scheduling.
- Dep.setLatency(Dep.getLatency() + 1);
- } else if (useBSBScheduling() &&
- QII.isLateInstrFeedsEarlyInstr(SrcInst, DstInst)) {
- // BSB scheduling.
- Dep.setLatency(Dep.getLatency() + 1);
- } else if (EnableTCLatencySched) {
- // TClass latency scheduling.
- // Check if SrcInst produces in 2C an operand of DstInst taken in stage 2B.
- if (QII.isTC1(SrcInst) || QII.isTC2(SrcInst))
- if (!QII.isTC1(DstInst) && !QII.isTC2(DstInst))
- Dep.setLatency(Dep.getLatency() + 1);
- }
+ // BSB scheduling.
+ if (QII.isHVXVec(SrcInst) || useBSBScheduling())
+ Dep.setLatency((Dep.getLatency() + 1) >> 1);
}
-/// If the SUnit has a zero latency edge, return the other SUnit.
-static SUnit *getZeroLatency(SUnit *N, SmallVector<SDep, 4> &Deps) {
- for (auto &I : Deps)
- if (I.isAssignedRegDep() && I.getLatency() == 0 &&
- !I.getSUnit()->getInstr()->isPseudo())
- return I.getSUnit();
- return nullptr;
+void HexagonSubtarget::restoreLatency(SUnit *Src, SUnit *Dst) const {
+ MachineInstr *SrcI = Src->getInstr();
+ for (auto &I : Src->Succs) {
+ if (!I.isAssignedRegDep() || I.getSUnit() != Dst)
+ continue;
+ unsigned DepR = I.getReg();
+ int DefIdx = -1;
+ for (unsigned OpNum = 0; OpNum < SrcI->getNumOperands(); OpNum++) {
+ const MachineOperand &MO = SrcI->getOperand(OpNum);
+ if (MO.isReg() && MO.isDef() && MO.getReg() == DepR)
+ DefIdx = OpNum;
+ }
+ assert(DefIdx >= 0 && "Def Reg not found in Src MI");
+ MachineInstr *DstI = Dst->getInstr();
+ for (unsigned OpNum = 0; OpNum < DstI->getNumOperands(); OpNum++) {
+ const MachineOperand &MO = DstI->getOperand(OpNum);
+ if (MO.isReg() && MO.isUse() && MO.getReg() == DepR) {
+ int Latency = (InstrInfo.getOperandLatency(&InstrItins, *SrcI,
+ DefIdx, *DstI, OpNum));
+
+ // For some instructions (ex: COPY), we might end up with < 0 latency
+ // as they don't have any Itinerary class associated with them.
+ if (Latency <= 0)
+ Latency = 1;
+
+ I.setLatency(Latency);
+ updateLatency(*SrcI, *DstI, I);
+ }
+ }
+
+ // Update the latency of opposite edge too.
+ for (auto &J : Dst->Preds) {
+ if (J.getSUnit() != Src)
+ continue;
+ J.setLatency(I.getLatency());
+ }
+ }
}
/// Change the latency between the two SUnits.
-void HexagonSubtarget::changeLatency(SUnit *Src, SmallVector<SDep, 4> &Deps,
- SUnit *Dst, unsigned Lat) const {
- MachineInstr &SrcI = *Src->getInstr();
- for (auto &I : Deps) {
+void HexagonSubtarget::changeLatency(SUnit *Src, SUnit *Dst, unsigned Lat)
+ const {
+ for (auto &I : Src->Succs) {
if (I.getSUnit() != Dst)
continue;
+ SDep T = I;
I.setLatency(Lat);
- SUnit *UpdateDst = I.getSUnit();
- updateLatency(SrcI, *UpdateDst->getInstr(), I);
+
// Update the latency of opposite edge too.
- for (auto &PI : UpdateDst->Preds) {
- if (PI.getSUnit() != Src || !PI.isAssignedRegDep())
- continue;
- PI.setLatency(Lat);
- updateLatency(SrcI, *UpdateDst->getInstr(), PI);
- }
+ T.setSUnit(Src);
+ auto F = std::find(Dst->Preds.begin(), Dst->Preds.end(), T);
+ assert(F != Dst->Preds.end());
+ F->setLatency(I.getLatency());
}
}
+/// If the SUnit has a zero latency edge, return the other SUnit.
+static SUnit *getZeroLatency(SUnit *N, SmallVector<SDep, 4> &Deps) {
+ for (auto &I : Deps)
+ if (I.isAssignedRegDep() && I.getLatency() == 0 &&
+ !I.getSUnit()->getInstr()->isPseudo())
+ return I.getSUnit();
+ return nullptr;
+}
+
// Return true if these are the best two instructions to schedule
// together with a zero latency. Only one dependence should have a zero
// latency. If there are multiple choices, choose the best, and change
-// ther others, if needed.
+// the others, if needed.
bool HexagonSubtarget::isBestZeroLatency(SUnit *Src, SUnit *Dst,
- const HexagonInstrInfo *TII) const {
+ const HexagonInstrInfo *TII, SmallSet<SUnit*, 4> &ExclSrc,
+ SmallSet<SUnit*, 4> &ExclDst) const {
MachineInstr &SrcInst = *Src->getInstr();
MachineInstr &DstInst = *Dst->getInstr();
@@ -277,6 +364,16 @@ bool HexagonSubtarget::isBestZeroLatency(SUnit *Src, SUnit *Dst,
if (SrcInst.isPHI() || DstInst.isPHI())
return false;
+ if (!TII->isToBeScheduledASAP(SrcInst, DstInst) &&
+ !TII->canExecuteInBundle(SrcInst, DstInst))
+ return false;
+
+ // The architecture doesn't allow three dependent instructions in the same
+ // packet. So, if the destination has a zero latency successor, then it's
+ // not a candidate for a zero latency predecessor.
+ if (getZeroLatency(Dst, Dst->Succs) != nullptr)
+ return false;
+
// Check if the Dst instruction is the best candidate first.
SUnit *Best = nullptr;
SUnit *DstBest = nullptr;
@@ -290,98 +387,53 @@ bool HexagonSubtarget::isBestZeroLatency(SUnit *Src, SUnit *Dst,
if (Best != Dst)
return false;
- // The caller frequents adds the same dependence twice. If so, then
+ // The caller frequently adds the same dependence twice. If so, then
// return true for this case too.
- if (Src == SrcBest && Dst == DstBest)
+ if ((Src == SrcBest && Dst == DstBest ) ||
+ (SrcBest == nullptr && Dst == DstBest) ||
+ (Src == SrcBest && Dst == nullptr))
return true;
// Reassign the latency for the previous bests, which requires setting
// the dependence edge in both directions.
- if (SrcBest != nullptr)
- changeLatency(SrcBest, SrcBest->Succs, Dst, 1);
- if (DstBest != nullptr)
- changeLatency(Src, Src->Succs, DstBest, 1);
- // If there is an edge from SrcBest to DstBst, then try to change that
- // to 0 now.
- if (SrcBest && DstBest)
- changeLatency(SrcBest, SrcBest->Succs, DstBest, 0);
-
- return true;
-}
-
-// Update the latency of a Phi when the Phi bridges two instructions that
-// require a multi-cycle latency.
-void HexagonSubtarget::changePhiLatency(MachineInstr &SrcInst, SUnit *Dst,
- SDep &Dep) const {
- if (!SrcInst.isPHI() || Dst->NumPreds == 0 || Dep.getLatency() != 0)
- return;
-
- for (const SDep &PI : Dst->Preds) {
- if (PI.getLatency() != 0)
- continue;
- Dep.setLatency(2);
- break;
- }
-}
-
-/// \brief Perform target specific adjustments to the latency of a schedule
-/// dependency.
-void HexagonSubtarget::adjustSchedDependency(SUnit *Src, SUnit *Dst,
- SDep &Dep) const {
- MachineInstr *SrcInst = Src->getInstr();
- MachineInstr *DstInst = Dst->getInstr();
- if (!Src->isInstr() || !Dst->isInstr())
- return;
-
- const HexagonInstrInfo *QII = static_cast<const HexagonInstrInfo *>(getInstrInfo());
-
- // Instructions with .new operands have zero latency.
- if (QII->canExecuteInBundle(*SrcInst, *DstInst) &&
- isBestZeroLatency(Src, Dst, QII)) {
- Dep.setLatency(0);
- return;
+ if (SrcBest != nullptr) {
+ if (!hasV60TOps())
+ changeLatency(SrcBest, Dst, 1);
+ else
+ restoreLatency(SrcBest, Dst);
}
-
- if (!hasV60TOps())
- return;
-
- // Don't adjust the latency of post-increment part of the instruction.
- if (QII->isPostIncrement(*SrcInst) && Dep.isAssignedRegDep()) {
- if (SrcInst->mayStore())
- return;
- if (Dep.getReg() != SrcInst->getOperand(0).getReg())
- return;
- } else if (QII->isPostIncrement(*DstInst) && Dep.getKind() == SDep::Anti) {
- if (DstInst->mayStore())
- return;
- if (Dep.getReg() != DstInst->getOperand(0).getReg())
- return;
- } else if (QII->isPostIncrement(*DstInst) && DstInst->mayStore() &&
- Dep.isAssignedRegDep()) {
- MachineOperand &Op = DstInst->getOperand(DstInst->getNumOperands() - 1);
- if (Op.isReg() && Dep.getReg() != Op.getReg())
- return;
- }
-
- // Check if we need to change any the latency values when Phis are added.
- if (useBSBScheduling() && SrcInst->isPHI()) {
- changePhiLatency(*SrcInst, Dst, Dep);
- return;
+ if (DstBest != nullptr) {
+ if (!hasV60TOps())
+ changeLatency(Src, DstBest, 1);
+ else
+ restoreLatency(Src, DstBest);
}
- // If it's a REG_SEQUENCE, use its destination instruction to determine
- // the correct latency.
- if (DstInst->isRegSequence() && Dst->NumSuccs == 1)
- DstInst = Dst->Succs[0].getSUnit()->getInstr();
-
- // Try to schedule uses near definitions to generate .cur.
- if (EnableDotCurSched && QII->isToBeScheduledASAP(*SrcInst, *DstInst) &&
- isBestZeroLatency(Src, Dst, QII)) {
- Dep.setLatency(0);
- return;
+ // Attempt to find another opprotunity for zero latency in a different
+ // dependence.
+ if (SrcBest && DstBest)
+ // If there is an edge from SrcBest to DstBst, then try to change that
+ // to 0 now.
+ changeLatency(SrcBest, DstBest, 0);
+ else if (DstBest) {
+ // Check if the previous best destination instruction has a new zero
+ // latency dependence opportunity.
+ ExclSrc.insert(Src);
+ for (auto &I : DstBest->Preds)
+ if (ExclSrc.count(I.getSUnit()) == 0 &&
+ isBestZeroLatency(I.getSUnit(), DstBest, TII, ExclSrc, ExclDst))
+ changeLatency(I.getSUnit(), DstBest, 0);
+ } else if (SrcBest) {
+ // Check if previous best source instruction has a new zero latency
+ // dependence opportunity.
+ ExclDst.insert(Dst);
+ for (auto &I : SrcBest->Succs)
+ if (ExclDst.count(I.getSUnit()) == 0 &&
+ isBestZeroLatency(SrcBest, I.getSUnit(), TII, ExclSrc, ExclDst))
+ changeLatency(SrcBest, I.getSUnit(), 0);
}
- updateLatency(*SrcInst, *DstInst, Dep);
+ return true;
}
unsigned HexagonSubtarget::getL1CacheLineSize() const {
@@ -392,3 +444,7 @@ unsigned HexagonSubtarget::getL1PrefetchDistance() const {
return 32;
}
+bool HexagonSubtarget::enableSubRegLiveness() const {
+ return EnableSubregLiveness;
+}
+
diff --git a/lib/Target/Hexagon/HexagonSubtarget.h b/lib/Target/Hexagon/HexagonSubtarget.h
index 6a3e7f13be4c..4379efa79c9c 100644
--- a/lib/Target/Hexagon/HexagonSubtarget.h
+++ b/lib/Target/Hexagon/HexagonSubtarget.h
@@ -104,6 +104,7 @@ public:
bool useHVXDblOps() const { return UseHVXOps && UseHVXDblOps; }
bool useHVXSglOps() const { return UseHVXOps && !UseHVXDblOps; }
bool useLongCalls() const { return UseLongCalls; }
+ bool usePredicatedCalls() const;
bool useBSBScheduling() const { return UseBSBScheduling; }
bool enableMachineScheduler() const override;
@@ -146,11 +147,10 @@ private:
// Helper function responsible for increasing the latency only.
void updateLatency(MachineInstr &SrcInst, MachineInstr &DstInst, SDep &Dep)
const;
- void changeLatency(SUnit *Src, SmallVector<SDep, 4> &Deps, SUnit *Dst,
- unsigned Lat) const;
- bool isBestZeroLatency(SUnit *Src, SUnit *Dst, const HexagonInstrInfo *TII)
- const;
- void changePhiLatency(MachineInstr &SrcInst, SUnit *Dst, SDep &Dep) const;
+ void restoreLatency(SUnit *Src, SUnit *Dst) const;
+ void changeLatency(SUnit *Src, SUnit *Dst, unsigned Lat) const;
+ bool isBestZeroLatency(SUnit *Src, SUnit *Dst, const HexagonInstrInfo *TII,
+ SmallSet<SUnit*, 4> &ExclSrc, SmallSet<SUnit*, 4> &ExclDst) const;
};
} // end namespace llvm
diff --git a/lib/Target/Hexagon/HexagonVLIWPacketizer.cpp b/lib/Target/Hexagon/HexagonVLIWPacketizer.cpp
index bf1dce67bd0a..c21b6e2515d3 100644
--- a/lib/Target/Hexagon/HexagonVLIWPacketizer.cpp
+++ b/lib/Target/Hexagon/HexagonVLIWPacketizer.cpp
@@ -334,7 +334,7 @@ bool HexagonPacketizerList::isNewifiable(const MachineInstr &MI,
// Vector stores can be predicated, and can be new-value stores, but
// they cannot be predicated on a .new predicate value.
if (NewRC == &Hexagon::PredRegsRegClass)
- if (HII->isV60VectorInstruction(MI) && MI.mayStore())
+ if (HII->isHVXVec(MI) && MI.mayStore())
return false;
return HII->isCondInst(MI) || HII->isJumpR(MI) || MI.isReturn() ||
HII->mayBeNewStore(MI);
@@ -377,9 +377,9 @@ void HexagonPacketizerList::cleanUpDotCur() {
bool HexagonPacketizerList::canPromoteToDotCur(const MachineInstr &MI,
const SUnit *PacketSU, unsigned DepReg, MachineBasicBlock::iterator &MII,
const TargetRegisterClass *RC) {
- if (!HII->isV60VectorInstruction(MI))
+ if (!HII->isHVXVec(MI))
return false;
- if (!HII->isV60VectorInstruction(*MII))
+ if (!HII->isHVXVec(*MII))
return false;
// Already a dot new instruction.
@@ -1365,7 +1365,7 @@ bool HexagonPacketizerList::isLegalToPacketizeTogether(SUnit *SUI, SUnit *SUJ) {
// Data dpendence ok if we have load.cur.
if (DepType == SDep::Data && HII->isDotCurInst(J)) {
- if (HII->isV60VectorInstruction(I))
+ if (HII->isHVXVec(I))
continue;
}
@@ -1374,6 +1374,8 @@ bool HexagonPacketizerList::isLegalToPacketizeTogether(SUnit *SUI, SUnit *SUJ) {
if (canPromoteToDotNew(I, SUJ, DepReg, II, RC)) {
if (promoteToDotNew(I, DepType, II, RC)) {
PromotedToDotNew = true;
+ if (cannotCoexist(I, J))
+ FoundSequentialDependence = true;
continue;
}
}
@@ -1418,26 +1420,7 @@ bool HexagonPacketizerList::isLegalToPacketizeTogether(SUnit *SUI, SUnit *SUJ) {
DepType != SDep::Output)
continue;
- // Ignore output dependences due to superregs. We can write to two
- // different subregisters of R1:0 for instance in the same cycle.
-
- // If neither I nor J defines DepReg, then this is a superfluous output
- // dependence. The dependence must be of the form:
- // R0 = ...
- // R1 = ...
- // and there is an output dependence between the two instructions with
- // DepReg = D0.
- // We want to ignore these dependences. Ideally, the dependence
- // constructor should annotate such dependences. We can then avoid this
- // relatively expensive check.
- //
if (DepType == SDep::Output) {
- // DepReg is the register that's responsible for the dependence.
- unsigned DepReg = SUJ->Succs[i].getReg();
-
- // Check if I and J really defines DepReg.
- if (!I.definesRegister(DepReg) && !J.definesRegister(DepReg))
- continue;
FoundSequentialDependence = true;
break;
}
@@ -1553,10 +1536,9 @@ bool HexagonPacketizerList::isLegalToPruneDependencies(SUnit *SUI, SUnit *SUJ) {
MachineInstr &I = *SUI->getInstr();
MachineInstr &J = *SUJ->getInstr();
- if (cannotCoexist(I, J))
- return false;
+ bool Coexist = !cannotCoexist(I, J);
- if (!Dependence)
+ if (Coexist && !Dependence)
return true;
// Check if the instruction was promoted to a dot-new. If so, demote it
@@ -1659,21 +1641,6 @@ bool HexagonPacketizerList::shouldAddToPacket(const MachineInstr &MI) {
}
-// Return true when ConsMI uses a register defined by ProdMI.
-static bool isDependent(const MachineInstr &ProdMI,
- const MachineInstr &ConsMI) {
- if (!ProdMI.getOperand(0).isReg())
- return false;
- unsigned DstReg = ProdMI.getOperand(0).getReg();
-
- for (auto &Op : ConsMI.operands())
- if (Op.isReg() && Op.isUse() && Op.getReg() == DstReg)
- // The MIs depend on each other.
- return true;
-
- return false;
-}
-
// V60 forward scheduling.
bool HexagonPacketizerList::producesStall(const MachineInstr &I) {
// If the packet already stalls, then ignore the stall from a subsequent
@@ -1695,40 +1662,48 @@ bool HexagonPacketizerList::producesStall(const MachineInstr &I) {
return false;
}
- // Check for stall between two vector instructions.
- if (HII->isV60VectorInstruction(I)) {
- for (auto J : OldPacketMIs) {
- if (!HII->isV60VectorInstruction(*J))
- continue;
- if (isDependent(*J, I) && !HII->isVecUsableNextPacket(*J, I))
- return true;
- }
+ SUnit *SUI = MIToSUnit[const_cast<MachineInstr *>(&I)];
- return false;
- }
+ // Check if the latency is 0 between this instruction and any instruction
+ // in the current packet. If so, we disregard any potential stalls due to
+ // the instructions in the previous packet. Most of the instruction pairs
+ // that can go together in the same packet have 0 latency between them.
+ // Only exceptions are newValueJumps as they're generated much later and
+ // the latencies can't be changed at that point. Another is .cur
+ // instructions if its consumer has a 0 latency successor (such as .new).
+ // In this case, the latency between .cur and the consumer stays non-zero
+ // even though we can have both .cur and .new in the same packet. Changing
+ // the latency to 0 is not an option as it causes software pipeliner to
+ // not pipeline in some cases.
+
+ // For Example:
+ // {
+ // I1: v6.cur = vmem(r0++#1)
+ // I2: v7 = valign(v6,v4,r2)
+ // I3: vmem(r5++#1) = v7.new
+ // }
+ // Here I2 and I3 has 0 cycle latency, but I1 and I2 has 2.
- // Check for stall between two scalar instructions. First, check that
- // there is no definition of a use in the current packet, because it
- // may be a candidate for .new.
- for (auto J : CurrentPacketMIs)
- if (!HII->isV60VectorInstruction(*J) && isDependent(*J, I))
- return false;
+ for (auto J : CurrentPacketMIs) {
+ SUnit *SUJ = MIToSUnit[J];
+ for (auto &Pred : SUI->Preds)
+ if (Pred.getSUnit() == SUJ &&
+ (Pred.getLatency() == 0 || HII->isNewValueJump(I) ||
+ HII->isToBeScheduledASAP(*J, I)))
+ return false;
+ }
- // Check for stall between I and instructions in the previous packet.
- if (MF.getSubtarget<HexagonSubtarget>().useBSBScheduling()) {
- for (auto J : OldPacketMIs) {
- if (HII->isV60VectorInstruction(*J))
- continue;
- if (!HII->isLateInstrFeedsEarlyInstr(*J, I))
- continue;
- if (isDependent(*J, I) && !HII->canExecuteInBundle(*J, I))
+ // Check if the latency is greater than one between this instruction and any
+ // instruction in the previous packet.
+ for (auto J : OldPacketMIs) {
+ SUnit *SUJ = MIToSUnit[J];
+ for (auto &Pred : SUI->Preds)
+ if (Pred.getSUnit() == SUJ && Pred.getLatency() > 1)
return true;
- }
}
// Check if the latency is greater than one between this instruction and any
// instruction in the previous packet.
- SUnit *SUI = MIToSUnit[const_cast<MachineInstr *>(&I)];
for (auto J : OldPacketMIs) {
SUnit *SUJ = MIToSUnit[J];
for (auto &Pred : SUI->Preds)
@@ -1739,7 +1714,6 @@ bool HexagonPacketizerList::producesStall(const MachineInstr &I) {
return false;
}
-
//===----------------------------------------------------------------------===//
// Public Constructor Functions
//===----------------------------------------------------------------------===//
diff --git a/lib/Target/Hexagon/MCTargetDesc/HexagonBaseInfo.h b/lib/Target/Hexagon/MCTargetDesc/HexagonBaseInfo.h
index adb546dc2140..d8009c5da08e 100644
--- a/lib/Target/Hexagon/MCTargetDesc/HexagonBaseInfo.h
+++ b/lib/Target/Hexagon/MCTargetDesc/HexagonBaseInfo.h
@@ -29,7 +29,7 @@ namespace llvm {
///
namespace HexagonII {
unsigned const TypeCVI_FIRST = TypeCVI_HIST;
- unsigned const TypeCVI_LAST = TypeCVI_VX_DV;
+ unsigned const TypeCVI_LAST = TypeCVI_VX_LATE;
enum SubTarget {
HasV4SubT = 0x3f,
diff --git a/lib/Target/Hexagon/MCTargetDesc/HexagonMCCodeEmitter.cpp b/lib/Target/Hexagon/MCTargetDesc/HexagonMCCodeEmitter.cpp
index dfb5f4cc8260..70410ff03a64 100644
--- a/lib/Target/Hexagon/MCTargetDesc/HexagonMCCodeEmitter.cpp
+++ b/lib/Target/Hexagon/MCTargetDesc/HexagonMCCodeEmitter.cpp
@@ -788,14 +788,6 @@ HexagonMCCodeEmitter::getMachineOpValue(MCInst const &MI, MCOperand const &MO,
if (HexagonMCInstrInfo::isSubInstruction(MI) ||
llvm::HexagonMCInstrInfo::getType(MCII, MI) == HexagonII::TypeCJ)
return HexagonMCInstrInfo::getDuplexRegisterNumbering(Reg);
- switch(MI.getOpcode()){
- case Hexagon::A2_tfrrcr:
- case Hexagon::A2_tfrcrr:
- if(Reg == Hexagon::M0)
- Reg = Hexagon::C6;
- if(Reg == Hexagon::M1)
- Reg = Hexagon::C7;
- }
return MCT.getRegisterInfo()->getEncodingValue(Reg);
}
diff --git a/lib/Target/Hexagon/MCTargetDesc/HexagonShuffler.cpp b/lib/Target/Hexagon/MCTargetDesc/HexagonShuffler.cpp
index a5afa1daeb9e..564d43b45cb8 100644
--- a/lib/Target/Hexagon/MCTargetDesc/HexagonShuffler.cpp
+++ b/lib/Target/Hexagon/MCTargetDesc/HexagonShuffler.cpp
@@ -102,12 +102,13 @@ void HexagonCVIResource::SetupTUL(TypeUnitsAndLanes *TUL, StringRef CPU) {
UnitsAndLanes(CVI_XLANE | CVI_SHIFT | CVI_MPY0 | CVI_MPY1, 1);
(*TUL)[HexagonII::TypeCVI_VA_DV] = UnitsAndLanes(CVI_XLANE | CVI_MPY0, 2);
(*TUL)[HexagonII::TypeCVI_VX] = UnitsAndLanes(CVI_MPY0 | CVI_MPY1, 1);
+ (*TUL)[HexagonII::TypeCVI_VX_LATE] = UnitsAndLanes(CVI_MPY0 | CVI_MPY1, 1);
(*TUL)[HexagonII::TypeCVI_VX_DV] = UnitsAndLanes(CVI_MPY0, 2);
(*TUL)[HexagonII::TypeCVI_VP] = UnitsAndLanes(CVI_XLANE, 1);
(*TUL)[HexagonII::TypeCVI_VP_VS] = UnitsAndLanes(CVI_XLANE, 2);
(*TUL)[HexagonII::TypeCVI_VS] = UnitsAndLanes(CVI_SHIFT, 1);
(*TUL)[HexagonII::TypeCVI_VINLANESAT] =
- (CPU == "hexagonv60" || CPU == "hexagonv61" || CPU == "hexagonv61v1")
+ (CPU == "hexagonv60")
? UnitsAndLanes(CVI_SHIFT, 1)
: UnitsAndLanes(CVI_XLANE | CVI_SHIFT | CVI_MPY0 | CVI_MPY1, 1);
(*TUL)[HexagonII::TypeCVI_VM_LD] =
@@ -291,10 +292,8 @@ bool HexagonShuffler::check() {
break;
case HexagonII::TypeNCJ:
++memory; // NV insns are memory-like.
- if (HexagonMCInstrInfo::getDesc(MCII, ID).isBranch()) {
- ++jumps, ++jump1;
- foundBranches.push_back(ISJ);
- }
+ ++jumps, ++jump1;
+ foundBranches.push_back(ISJ);
break;
case HexagonII::TypeV2LDST:
if (HexagonMCInstrInfo::getDesc(MCII, ID).mayLoad()) {
diff --git a/lib/Target/Hexagon/RDFLiveness.cpp b/lib/Target/Hexagon/RDFLiveness.cpp
index 726b7af73b0a..9d8a3881797b 100644
--- a/lib/Target/Hexagon/RDFLiveness.cpp
+++ b/lib/Target/Hexagon/RDFLiveness.cpp
@@ -497,26 +497,33 @@ void Liveness::computePhiInfo() {
// = R1:0 u6 Not reached by d1 (covered collectively
// by d3 and d5), but following reached
// defs and uses from d1 will lead here.
- auto InPhiDefs = [&PhiDefs] (NodeAddr<DefNode*> DA) -> bool {
- return PhiDefs.count(DA.Id);
- };
for (auto UI = RealUses.begin(), UE = RealUses.end(); UI != UE; ) {
// For each reached register UI->first, there is a set UI->second, of
// uses of it. For each such use, check if it is reached by this phi,
// i.e. check if the set of its reaching uses intersects the set of
// this phi's defs.
- NodeRefSet &Uses = UI->second;
- for (auto I = Uses.begin(), E = Uses.end(); I != E; ) {
- auto UA = DFG.addr<UseNode*>(I->first);
+ NodeRefSet Uses = UI->second;
+ UI->second.clear();
+ for (std::pair<NodeId,LaneBitmask> I : Uses) {
+ auto UA = DFG.addr<UseNode*>(I.first);
// Undef flag is checked above.
assert((UA.Addr->getFlags() & NodeAttrs::Undef) == 0);
- RegisterRef R(UI->first, I->second);
- NodeList RDs = getAllReachingDefs(R, UA);
- // If none of the reaching defs of R are from this phi, remove this
- // use of R.
- I = any_of(RDs, InPhiDefs) ? std::next(I) : Uses.erase(I);
+ RegisterRef R(UI->first, I.second);
+ // Calculate the exposed part of the reached use.
+ RegisterAggr Covered(PRI);
+ for (NodeAddr<DefNode*> DA : getAllReachingDefs(R, UA)) {
+ if (PhiDefs.count(DA.Id))
+ break;
+ Covered.insert(DA.Addr->getRegRef(DFG));
+ }
+ if (RegisterRef RC = Covered.clearIn(R)) {
+ // We are updating the map for register UI->first, so we need
+ // to map RC to be expressed in terms of that register.
+ RegisterRef S = PRI.mapTo(RC, UI->first);
+ UI->second.insert({I.first, S.Mask});
+ }
}
- UI = Uses.empty() ? RealUses.erase(UI) : std::next(UI);
+ UI = UI->second.empty() ? RealUses.erase(UI) : std::next(UI);
}
// If this phi reaches some "real" uses, add it to the queue for upward
@@ -626,7 +633,7 @@ void Liveness::computePhiInfo() {
const RegisterAggr &DRs = PhiDRs.at(P.first);
if (!DRs.hasAliasOf(R))
continue;
- R = DRs.intersectWith(R);
+ R = PRI.mapTo(DRs.intersectWith(R), T.first);
for (std::pair<NodeId,LaneBitmask> V : T.second) {
LaneBitmask M = R.Mask & V.second;
if (M.none())
diff --git a/lib/Target/Hexagon/RDFRegisters.cpp b/lib/Target/Hexagon/RDFRegisters.cpp
index 4224ded3418b..2aabf4ee1a38 100644
--- a/lib/Target/Hexagon/RDFRegisters.cpp
+++ b/lib/Target/Hexagon/RDFRegisters.cpp
@@ -212,6 +212,21 @@ bool PhysicalRegisterInfo::aliasMM(RegisterRef RM, RegisterRef RN) const {
return false;
}
+RegisterRef PhysicalRegisterInfo::mapTo(RegisterRef RR, unsigned R) const {
+ if (RR.Reg == R)
+ return RR;
+ if (unsigned Idx = TRI.getSubRegIndex(R, RR.Reg))
+ return RegisterRef(R, TRI.composeSubRegIndexLaneMask(Idx, RR.Mask));
+ if (unsigned Idx = TRI.getSubRegIndex(RR.Reg, R)) {
+ const RegInfo &RI = RegInfos[R];
+ LaneBitmask RCM = RI.RegClass ? RI.RegClass->LaneMask
+ : LaneBitmask::getAll();
+ LaneBitmask M = TRI.reverseComposeSubRegIndexLaneMask(Idx, RR.Mask);
+ return RegisterRef(R, M & RCM);
+ }
+ llvm_unreachable("Invalid arguments: unrelated registers?");
+}
+
bool RegisterAggr::hasAliasOf(RegisterRef RR) const {
if (PhysicalRegisterInfo::isRegMaskId(RR.Reg))
diff --git a/lib/Target/Hexagon/RDFRegisters.h b/lib/Target/Hexagon/RDFRegisters.h
index 314d8b5666d7..09b733ce616b 100644
--- a/lib/Target/Hexagon/RDFRegisters.h
+++ b/lib/Target/Hexagon/RDFRegisters.h
@@ -112,6 +112,7 @@ namespace rdf {
const BitVector &getMaskUnits(RegisterId MaskId) const {
return MaskInfos[TargetRegisterInfo::stackSlot2Index(MaskId)].Units;
}
+ RegisterRef mapTo(RegisterRef RR, unsigned R) const;
const TargetRegisterInfo &getTRI() const { return TRI; }
diff --git a/lib/Target/Mips/MipsAsmPrinter.cpp b/lib/Target/Mips/MipsAsmPrinter.cpp
index 134f7ac3aea3..9cdbf510737f 100644
--- a/lib/Target/Mips/MipsAsmPrinter.cpp
+++ b/lib/Target/Mips/MipsAsmPrinter.cpp
@@ -81,7 +81,7 @@ bool MipsAsmPrinter::runOnMachineFunction(MachineFunction &MF) {
AsmPrinter::runOnMachineFunction(MF);
- EmitXRayTable();
+ emitXRayTable();
return true;
}
@@ -1148,39 +1148,6 @@ void MipsAsmPrinter::EmitSled(const MachineInstr &MI, SledKind Kind) {
recordSled(CurSled, MI, Kind);
}
-void MipsAsmPrinter::EmitXRayTable() {
- if (Sleds.empty())
- return;
- if (Subtarget->isTargetELF()) {
- auto PrevSection = OutStreamer->getCurrentSectionOnly();
- auto Fn = MF->getFunction();
- MCSection *Section;
-
- if (Fn->hasComdat())
- Section = OutContext.getELFSection("xray_instr_map", ELF::SHT_PROGBITS,
- ELF::SHF_ALLOC | ELF::SHF_GROUP, 0,
- Fn->getComdat()->getName());
- else
- Section =
- OutContext.getELFSection("xray_instr_map", ELF::SHT_PROGBITS,
- ELF::SHF_ALLOC, 0, CurrentFnSym->getName());
-
- OutStreamer->SwitchSection(Section);
- for (const auto &Sled : Sleds) {
- OutStreamer->EmitSymbolValue(Sled.Sled, Subtarget->isGP64bit() ? 8 : 4);
- OutStreamer->EmitSymbolValue(CurrentFnSym, Subtarget->isGP64bit() ? 8 : 4);
- auto Kind = static_cast<uint8_t>(Sled.Kind);
- OutStreamer->EmitBytes(
- StringRef(reinterpret_cast<const char *>(&Kind), 1));
- OutStreamer->EmitBytes(
- StringRef(reinterpret_cast<const char *>(&Sled.AlwaysInstrument), 1));
- OutStreamer->EmitZeros(Subtarget->isGP64bit() ? 14 : 6);
- }
- OutStreamer->SwitchSection(PrevSection);
- }
- Sleds.clear();
-}
-
void MipsAsmPrinter::LowerPATCHABLE_FUNCTION_ENTER(const MachineInstr &MI) {
EmitSled(MI, SledKind::FUNCTION_ENTER);
}
diff --git a/lib/Target/NVPTX/NVPTXISelLowering.cpp b/lib/Target/NVPTX/NVPTXISelLowering.cpp
index 4d06912054a2..61fdda8aa109 100644
--- a/lib/Target/NVPTX/NVPTXISelLowering.cpp
+++ b/lib/Target/NVPTX/NVPTXISelLowering.cpp
@@ -1,4661 +1,4662 @@
-//===-- NVPTXISelLowering.cpp - NVPTX DAG Lowering Implementation ---------===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// This file defines the interfaces that NVPTX uses to lower LLVM code into a
-// selection DAG.
-//
-//===----------------------------------------------------------------------===//
-
-#include "MCTargetDesc/NVPTXBaseInfo.h"
-#include "NVPTX.h"
-#include "NVPTXISelLowering.h"
-#include "NVPTXSection.h"
-#include "NVPTXSubtarget.h"
-#include "NVPTXTargetMachine.h"
-#include "NVPTXTargetObjectFile.h"
-#include "NVPTXUtilities.h"
-#include "llvm/ADT/APInt.h"
-#include "llvm/ADT/SmallVector.h"
-#include "llvm/ADT/StringRef.h"
-#include "llvm/CodeGen/Analysis.h"
-#include "llvm/CodeGen/MachineFunction.h"
-#include "llvm/CodeGen/MachineMemOperand.h"
-#include "llvm/CodeGen/MachineValueType.h"
-#include "llvm/CodeGen/SelectionDAG.h"
-#include "llvm/CodeGen/SelectionDAGNodes.h"
-#include "llvm/CodeGen/ValueTypes.h"
-#include "llvm/IR/Argument.h"
-#include "llvm/IR/Attributes.h"
-#include "llvm/IR/CallSite.h"
-#include "llvm/IR/Constants.h"
-#include "llvm/IR/DataLayout.h"
-#include "llvm/IR/DerivedTypes.h"
-#include "llvm/IR/Function.h"
-#include "llvm/IR/GlobalValue.h"
-#include "llvm/IR/Instruction.h"
-#include "llvm/IR/Instructions.h"
-#include "llvm/IR/Module.h"
-#include "llvm/IR/Type.h"
-#include "llvm/IR/Value.h"
-#include "llvm/Support/Casting.h"
-#include "llvm/Support/CodeGen.h"
-#include "llvm/Support/CommandLine.h"
-#include "llvm/Support/ErrorHandling.h"
-#include "llvm/Support/MathExtras.h"
-#include "llvm/Support/raw_ostream.h"
-#include "llvm/Target/TargetCallingConv.h"
-#include "llvm/Target/TargetLowering.h"
-#include "llvm/Target/TargetMachine.h"
-#include "llvm/Target/TargetOptions.h"
-#include <algorithm>
-#include <cassert>
-#include <cstdint>
-#include <iterator>
-#include <sstream>
-#include <string>
-#include <utility>
-#include <vector>
-
-#undef DEBUG_TYPE
-#define DEBUG_TYPE "nvptx-lower"
-
-using namespace llvm;
-
-static unsigned int uniqueCallSite = 0;
-
-static cl::opt<bool> sched4reg(
- "nvptx-sched4reg",
- cl::desc("NVPTX Specific: schedule for register pressue"), cl::init(false));
-
-static cl::opt<unsigned>
-FMAContractLevelOpt("nvptx-fma-level", cl::ZeroOrMore, cl::Hidden,
- cl::desc("NVPTX Specific: FMA contraction (0: don't do it"
- " 1: do it 2: do it aggressively"),
- cl::init(2));
-
-static cl::opt<int> UsePrecDivF32(
- "nvptx-prec-divf32", cl::ZeroOrMore, cl::Hidden,
- cl::desc("NVPTX Specifies: 0 use div.approx, 1 use div.full, 2 use"
- " IEEE Compliant F32 div.rnd if available."),
- cl::init(2));
-
-static cl::opt<bool> UsePrecSqrtF32(
- "nvptx-prec-sqrtf32", cl::Hidden,
- cl::desc("NVPTX Specific: 0 use sqrt.approx, 1 use sqrt.rn."),
- cl::init(true));
-
-static cl::opt<bool> FtzEnabled(
- "nvptx-f32ftz", cl::ZeroOrMore, cl::Hidden,
- cl::desc("NVPTX Specific: Flush f32 subnormals to sign-preserving zero."),
- cl::init(false));
-
-int NVPTXTargetLowering::getDivF32Level() const {
- if (UsePrecDivF32.getNumOccurrences() > 0) {
- // If nvptx-prec-div32=N is used on the command-line, always honor it
- return UsePrecDivF32;
- } else {
- // Otherwise, use div.approx if fast math is enabled
- if (getTargetMachine().Options.UnsafeFPMath)
- return 0;
- else
- return 2;
- }
-}
-
-bool NVPTXTargetLowering::usePrecSqrtF32() const {
- if (UsePrecSqrtF32.getNumOccurrences() > 0) {
- // If nvptx-prec-sqrtf32 is used on the command-line, always honor it
- return UsePrecSqrtF32;
- } else {
- // Otherwise, use sqrt.approx if fast math is enabled
- return !getTargetMachine().Options.UnsafeFPMath;
- }
-}
-
-bool NVPTXTargetLowering::useF32FTZ(const MachineFunction &MF) const {
- // TODO: Get rid of this flag; there can be only one way to do this.
- if (FtzEnabled.getNumOccurrences() > 0) {
- // If nvptx-f32ftz is used on the command-line, always honor it
- return FtzEnabled;
- } else {
- const Function *F = MF.getFunction();
- // Otherwise, check for an nvptx-f32ftz attribute on the function
- if (F->hasFnAttribute("nvptx-f32ftz"))
- return F->getFnAttribute("nvptx-f32ftz").getValueAsString() == "true";
- else
- return false;
- }
-}
-
-static bool IsPTXVectorType(MVT VT) {
- switch (VT.SimpleTy) {
- default:
- return false;
- case MVT::v2i1:
- case MVT::v4i1:
- case MVT::v2i8:
- case MVT::v4i8:
- case MVT::v2i16:
- case MVT::v4i16:
- case MVT::v2i32:
- case MVT::v4i32:
- case MVT::v2i64:
- case MVT::v2f16:
- case MVT::v4f16:
- case MVT::v8f16: // <4 x f16x2>
- case MVT::v2f32:
- case MVT::v4f32:
- case MVT::v2f64:
- return true;
- }
-}
-
-/// ComputePTXValueVTs - For the given Type \p Ty, returns the set of primitive
-/// EVTs that compose it. Unlike ComputeValueVTs, this will break apart vectors
-/// into their primitive components.
-/// NOTE: This is a band-aid for code that expects ComputeValueVTs to return the
-/// same number of types as the Ins/Outs arrays in LowerFormalArguments,
-/// LowerCall, and LowerReturn.
-static void ComputePTXValueVTs(const TargetLowering &TLI, const DataLayout &DL,
- Type *Ty, SmallVectorImpl<EVT> &ValueVTs,
- SmallVectorImpl<uint64_t> *Offsets = nullptr,
- uint64_t StartingOffset = 0) {
- SmallVector<EVT, 16> TempVTs;
- SmallVector<uint64_t, 16> TempOffsets;
-
- ComputeValueVTs(TLI, DL, Ty, TempVTs, &TempOffsets, StartingOffset);
- for (unsigned i = 0, e = TempVTs.size(); i != e; ++i) {
- EVT VT = TempVTs[i];
- uint64_t Off = TempOffsets[i];
- // Split vectors into individual elements, except for v2f16, which
- // we will pass as a single scalar.
- if (VT.isVector()) {
- unsigned NumElts = VT.getVectorNumElements();
- EVT EltVT = VT.getVectorElementType();
- // Vectors with an even number of f16 elements will be passed to
- // us as an array of v2f16 elements. We must match this so we
- // stay in sync with Ins/Outs.
- if (EltVT == MVT::f16 && NumElts % 2 == 0) {
- EltVT = MVT::v2f16;
- NumElts /= 2;
- }
- for (unsigned j = 0; j != NumElts; ++j) {
- ValueVTs.push_back(EltVT);
- if (Offsets)
- Offsets->push_back(Off + j * EltVT.getStoreSize());
- }
- } else {
- ValueVTs.push_back(VT);
- if (Offsets)
- Offsets->push_back(Off);
- }
- }
-}
-
-// Check whether we can merge loads/stores of some of the pieces of a
-// flattened function parameter or return value into a single vector
-// load/store.
-//
-// The flattened parameter is represented as a list of EVTs and
-// offsets, and the whole structure is aligned to ParamAlignment. This
-// function determines whether we can load/store pieces of the
-// parameter starting at index Idx using a single vectorized op of
-// size AccessSize. If so, it returns the number of param pieces
-// covered by the vector op. Otherwise, it returns 1.
-static unsigned CanMergeParamLoadStoresStartingAt(
- unsigned Idx, uint32_t AccessSize, const SmallVectorImpl<EVT> &ValueVTs,
- const SmallVectorImpl<uint64_t> &Offsets, unsigned ParamAlignment) {
- assert(isPowerOf2_32(AccessSize) && "must be a power of 2!");
-
- // Can't vectorize if param alignment is not sufficient.
- if (AccessSize > ParamAlignment)
- return 1;
- // Can't vectorize if offset is not aligned.
- if (Offsets[Idx] & (AccessSize - 1))
- return 1;
-
- EVT EltVT = ValueVTs[Idx];
- unsigned EltSize = EltVT.getStoreSize();
-
- // Element is too large to vectorize.
- if (EltSize >= AccessSize)
- return 1;
-
- unsigned NumElts = AccessSize / EltSize;
- // Can't vectorize if AccessBytes if not a multiple of EltSize.
- if (AccessSize != EltSize * NumElts)
- return 1;
-
- // We don't have enough elements to vectorize.
- if (Idx + NumElts > ValueVTs.size())
- return 1;
-
- // PTX ISA can only deal with 2- and 4-element vector ops.
- if (NumElts != 4 && NumElts != 2)
- return 1;
-
- for (unsigned j = Idx + 1; j < Idx + NumElts; ++j) {
- // Types do not match.
- if (ValueVTs[j] != EltVT)
- return 1;
-
- // Elements are not contiguous.
- if (Offsets[j] - Offsets[j - 1] != EltSize)
- return 1;
- }
- // OK. We can vectorize ValueVTs[i..i+NumElts)
- return NumElts;
-}
-
-// Flags for tracking per-element vectorization state of loads/stores
-// of a flattened function parameter or return value.
-enum ParamVectorizationFlags {
- PVF_INNER = 0x0, // Middle elements of a vector.
- PVF_FIRST = 0x1, // First element of the vector.
- PVF_LAST = 0x2, // Last element of the vector.
- // Scalar is effectively a 1-element vector.
- PVF_SCALAR = PVF_FIRST | PVF_LAST
-};
-
-// Computes whether and how we can vectorize the loads/stores of a
-// flattened function parameter or return value.
-//
-// The flattened parameter is represented as the list of ValueVTs and
-// Offsets, and is aligned to ParamAlignment bytes. We return a vector
-// of the same size as ValueVTs indicating how each piece should be
-// loaded/stored (i.e. as a scalar, or as part of a vector
-// load/store).
-static SmallVector<ParamVectorizationFlags, 16>
-VectorizePTXValueVTs(const SmallVectorImpl<EVT> &ValueVTs,
- const SmallVectorImpl<uint64_t> &Offsets,
- unsigned ParamAlignment) {
- // Set vector size to match ValueVTs and mark all elements as
- // scalars by default.
- SmallVector<ParamVectorizationFlags, 16> VectorInfo;
- VectorInfo.assign(ValueVTs.size(), PVF_SCALAR);
-
- // Check what we can vectorize using 128/64/32-bit accesses.
- for (int I = 0, E = ValueVTs.size(); I != E; ++I) {
- // Skip elements we've already processed.
- assert(VectorInfo[I] == PVF_SCALAR && "Unexpected vector info state.");
- for (unsigned AccessSize : {16, 8, 4, 2}) {
- unsigned NumElts = CanMergeParamLoadStoresStartingAt(
- I, AccessSize, ValueVTs, Offsets, ParamAlignment);
- // Mark vectorized elements.
- switch (NumElts) {
- default:
- llvm_unreachable("Unexpected return value");
- case 1:
- // Can't vectorize using this size, try next smaller size.
- continue;
- case 2:
- assert(I + 1 < E && "Not enough elements.");
- VectorInfo[I] = PVF_FIRST;
- VectorInfo[I + 1] = PVF_LAST;
- I += 1;
- break;
- case 4:
- assert(I + 3 < E && "Not enough elements.");
- VectorInfo[I] = PVF_FIRST;
- VectorInfo[I + 1] = PVF_INNER;
- VectorInfo[I + 2] = PVF_INNER;
- VectorInfo[I + 3] = PVF_LAST;
- I += 3;
- break;
- }
- // Break out of the inner loop because we've already succeeded
- // using largest possible AccessSize.
- break;
- }
- }
- return VectorInfo;
-}
-
-// NVPTXTargetLowering Constructor.
-NVPTXTargetLowering::NVPTXTargetLowering(const NVPTXTargetMachine &TM,
- const NVPTXSubtarget &STI)
- : TargetLowering(TM), nvTM(&TM), STI(STI) {
- // always lower memset, memcpy, and memmove intrinsics to load/store
- // instructions, rather
- // then generating calls to memset, mempcy or memmove.
- MaxStoresPerMemset = (unsigned) 0xFFFFFFFF;
- MaxStoresPerMemcpy = (unsigned) 0xFFFFFFFF;
- MaxStoresPerMemmove = (unsigned) 0xFFFFFFFF;
-
- setBooleanContents(ZeroOrNegativeOneBooleanContent);
- setBooleanVectorContents(ZeroOrNegativeOneBooleanContent);
-
- // Jump is Expensive. Don't create extra control flow for 'and', 'or'
- // condition branches.
- setJumpIsExpensive(true);
-
- // Wide divides are _very_ slow. Try to reduce the width of the divide if
- // possible.
- addBypassSlowDiv(64, 32);
-
- // By default, use the Source scheduling
- if (sched4reg)
- setSchedulingPreference(Sched::RegPressure);
- else
- setSchedulingPreference(Sched::Source);
-
- auto setFP16OperationAction = [&](unsigned Op, MVT VT, LegalizeAction Action,
- LegalizeAction NoF16Action) {
- setOperationAction(Op, VT, STI.allowFP16Math() ? Action : NoF16Action);
- };
-
- addRegisterClass(MVT::i1, &NVPTX::Int1RegsRegClass);
- addRegisterClass(MVT::i16, &NVPTX::Int16RegsRegClass);
- addRegisterClass(MVT::i32, &NVPTX::Int32RegsRegClass);
- addRegisterClass(MVT::i64, &NVPTX::Int64RegsRegClass);
- addRegisterClass(MVT::f32, &NVPTX::Float32RegsRegClass);
- addRegisterClass(MVT::f64, &NVPTX::Float64RegsRegClass);
- addRegisterClass(MVT::f16, &NVPTX::Float16RegsRegClass);
- addRegisterClass(MVT::v2f16, &NVPTX::Float16x2RegsRegClass);
-
- // Conversion to/from FP16/FP16x2 is always legal.
- setOperationAction(ISD::SINT_TO_FP, MVT::f16, Legal);
- setOperationAction(ISD::FP_TO_SINT, MVT::f16, Legal);
- setOperationAction(ISD::BUILD_VECTOR, MVT::v2f16, Custom);
- setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v2f16, Custom);
-
- setFP16OperationAction(ISD::SETCC, MVT::f16, Legal, Promote);
- setFP16OperationAction(ISD::SETCC, MVT::v2f16, Legal, Expand);
-
- // Operations not directly supported by NVPTX.
- setOperationAction(ISD::SELECT_CC, MVT::f16, Expand);
- setOperationAction(ISD::SELECT_CC, MVT::v2f16, Expand);
- setOperationAction(ISD::SELECT_CC, MVT::f32, Expand);
- setOperationAction(ISD::SELECT_CC, MVT::f64, Expand);
- setOperationAction(ISD::SELECT_CC, MVT::i1, Expand);
- setOperationAction(ISD::SELECT_CC, MVT::i8, Expand);
- setOperationAction(ISD::SELECT_CC, MVT::i16, Expand);
- setOperationAction(ISD::SELECT_CC, MVT::i32, Expand);
- setOperationAction(ISD::SELECT_CC, MVT::i64, Expand);
- setOperationAction(ISD::BR_CC, MVT::f16, Expand);
- setOperationAction(ISD::BR_CC, MVT::v2f16, Expand);
- setOperationAction(ISD::BR_CC, MVT::f32, Expand);
- setOperationAction(ISD::BR_CC, MVT::f64, Expand);
- setOperationAction(ISD::BR_CC, MVT::i1, Expand);
- setOperationAction(ISD::BR_CC, MVT::i8, Expand);
- setOperationAction(ISD::BR_CC, MVT::i16, Expand);
- setOperationAction(ISD::BR_CC, MVT::i32, Expand);
- setOperationAction(ISD::BR_CC, MVT::i64, Expand);
- // Some SIGN_EXTEND_INREG can be done using cvt instruction.
- // For others we will expand to a SHL/SRA pair.
- setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i64, Legal);
- setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i32, Legal);
- setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i16, Legal);
- setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i8 , Legal);
- setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i1, Expand);
-
- setOperationAction(ISD::SHL_PARTS, MVT::i32 , Custom);
- setOperationAction(ISD::SRA_PARTS, MVT::i32 , Custom);
- setOperationAction(ISD::SRL_PARTS, MVT::i32 , Custom);
- setOperationAction(ISD::SHL_PARTS, MVT::i64 , Custom);
- setOperationAction(ISD::SRA_PARTS, MVT::i64 , Custom);
- setOperationAction(ISD::SRL_PARTS, MVT::i64 , Custom);
-
- setOperationAction(ISD::BITREVERSE, MVT::i32, Legal);
- setOperationAction(ISD::BITREVERSE, MVT::i64, Legal);
-
- if (STI.hasROT64()) {
- setOperationAction(ISD::ROTL, MVT::i64, Legal);
- setOperationAction(ISD::ROTR, MVT::i64, Legal);
- } else {
- setOperationAction(ISD::ROTL, MVT::i64, Expand);
- setOperationAction(ISD::ROTR, MVT::i64, Expand);
- }
- if (STI.hasROT32()) {
- setOperationAction(ISD::ROTL, MVT::i32, Legal);
- setOperationAction(ISD::ROTR, MVT::i32, Legal);
- } else {
- setOperationAction(ISD::ROTL, MVT::i32, Expand);
- setOperationAction(ISD::ROTR, MVT::i32, Expand);
- }
-
- setOperationAction(ISD::ROTL, MVT::i16, Expand);
- setOperationAction(ISD::ROTR, MVT::i16, Expand);
- setOperationAction(ISD::ROTL, MVT::i8, Expand);
- setOperationAction(ISD::ROTR, MVT::i8, Expand);
- setOperationAction(ISD::BSWAP, MVT::i16, Expand);
- setOperationAction(ISD::BSWAP, MVT::i32, Expand);
- setOperationAction(ISD::BSWAP, MVT::i64, Expand);
-
- // Indirect branch is not supported.
- // This also disables Jump Table creation.
- setOperationAction(ISD::BR_JT, MVT::Other, Expand);
- setOperationAction(ISD::BRIND, MVT::Other, Expand);
-
- setOperationAction(ISD::GlobalAddress, MVT::i32, Custom);
- setOperationAction(ISD::GlobalAddress, MVT::i64, Custom);
-
- // We want to legalize constant related memmove and memcopy
- // intrinsics.
- setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::Other, Custom);
-
- // Turn FP extload into load/fpextend
- setLoadExtAction(ISD::EXTLOAD, MVT::f32, MVT::f16, Expand);
- setLoadExtAction(ISD::EXTLOAD, MVT::f64, MVT::f16, Expand);
- setLoadExtAction(ISD::EXTLOAD, MVT::f64, MVT::f32, Expand);
- setLoadExtAction(ISD::EXTLOAD, MVT::v2f32, MVT::v2f16, Expand);
- setLoadExtAction(ISD::EXTLOAD, MVT::v2f64, MVT::v2f16, Expand);
- setLoadExtAction(ISD::EXTLOAD, MVT::v2f64, MVT::v2f32, Expand);
- setLoadExtAction(ISD::EXTLOAD, MVT::v4f32, MVT::v4f16, Expand);
- setLoadExtAction(ISD::EXTLOAD, MVT::v4f64, MVT::v4f16, Expand);
- setLoadExtAction(ISD::EXTLOAD, MVT::v4f64, MVT::v4f32, Expand);
- // Turn FP truncstore into trunc + store.
- // FIXME: vector types should also be expanded
- setTruncStoreAction(MVT::f32, MVT::f16, Expand);
- setTruncStoreAction(MVT::f64, MVT::f16, Expand);
- setTruncStoreAction(MVT::f64, MVT::f32, Expand);
-
- // PTX does not support load / store predicate registers
- setOperationAction(ISD::LOAD, MVT::i1, Custom);
- setOperationAction(ISD::STORE, MVT::i1, Custom);
-
- for (MVT VT : MVT::integer_valuetypes()) {
- setLoadExtAction(ISD::SEXTLOAD, VT, MVT::i1, Promote);
- setLoadExtAction(ISD::ZEXTLOAD, VT, MVT::i1, Promote);
- setTruncStoreAction(VT, MVT::i1, Expand);
- }
-
- // This is legal in NVPTX
- setOperationAction(ISD::ConstantFP, MVT::f64, Legal);
- setOperationAction(ISD::ConstantFP, MVT::f32, Legal);
- setOperationAction(ISD::ConstantFP, MVT::f16, Legal);
-
- // TRAP can be lowered to PTX trap
- setOperationAction(ISD::TRAP, MVT::Other, Legal);
-
- setOperationAction(ISD::ADDC, MVT::i64, Expand);
- setOperationAction(ISD::ADDE, MVT::i64, Expand);
-
- // Register custom handling for vector loads/stores
- for (MVT VT : MVT::vector_valuetypes()) {
- if (IsPTXVectorType(VT)) {
- setOperationAction(ISD::LOAD, VT, Custom);
- setOperationAction(ISD::STORE, VT, Custom);
- setOperationAction(ISD::INTRINSIC_W_CHAIN, VT, Custom);
- }
- }
-
- // Custom handling for i8 intrinsics
- setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::i8, Custom);
-
- for (const auto& Ty : {MVT::i16, MVT::i32, MVT::i64}) {
- setOperationAction(ISD::SMIN, Ty, Legal);
- setOperationAction(ISD::SMAX, Ty, Legal);
- setOperationAction(ISD::UMIN, Ty, Legal);
- setOperationAction(ISD::UMAX, Ty, Legal);
-
- setOperationAction(ISD::CTPOP, Ty, Legal);
- setOperationAction(ISD::CTLZ, Ty, Legal);
- }
-
- setOperationAction(ISD::CTTZ, MVT::i16, Expand);
- setOperationAction(ISD::CTTZ, MVT::i32, Expand);
- setOperationAction(ISD::CTTZ, MVT::i64, Expand);
-
- // PTX does not directly support SELP of i1, so promote to i32 first
- setOperationAction(ISD::SELECT, MVT::i1, Custom);
-
- // PTX cannot multiply two i64s in a single instruction.
- setOperationAction(ISD::SMUL_LOHI, MVT::i64, Expand);
- setOperationAction(ISD::UMUL_LOHI, MVT::i64, Expand);
-
- // We have some custom DAG combine patterns for these nodes
- setTargetDAGCombine(ISD::ADD);
- setTargetDAGCombine(ISD::AND);
- setTargetDAGCombine(ISD::FADD);
- setTargetDAGCombine(ISD::MUL);
- setTargetDAGCombine(ISD::SHL);
- setTargetDAGCombine(ISD::SREM);
- setTargetDAGCombine(ISD::UREM);
-
- // setcc for f16x2 needs special handling to prevent legalizer's
- // attempt to scalarize it due to v2i1 not being legal.
- if (STI.allowFP16Math())
- setTargetDAGCombine(ISD::SETCC);
-
- // Promote fp16 arithmetic if fp16 hardware isn't available or the
- // user passed --nvptx-no-fp16-math. The flag is useful because,
- // although sm_53+ GPUs have some sort of FP16 support in
- // hardware, only sm_53 and sm_60 have full implementation. Others
- // only have token amount of hardware and are likely to run faster
- // by using fp32 units instead.
- for (const auto &Op : {ISD::FADD, ISD::FMUL, ISD::FSUB, ISD::FMA}) {
- setFP16OperationAction(Op, MVT::f16, Legal, Promote);
- setFP16OperationAction(Op, MVT::v2f16, Legal, Expand);
- }
-
- // There's no neg.f16 instruction. Expand to (0-x).
- setOperationAction(ISD::FNEG, MVT::f16, Expand);
- setOperationAction(ISD::FNEG, MVT::v2f16, Expand);
-
- // (would be) Library functions.
-
- // These map to conversion instructions for scalar FP types.
- for (const auto &Op : {ISD::FCEIL, ISD::FFLOOR, ISD::FNEARBYINT, ISD::FRINT,
- ISD::FROUND, ISD::FTRUNC}) {
- setOperationAction(Op, MVT::f16, Legal);
- setOperationAction(Op, MVT::f32, Legal);
- setOperationAction(Op, MVT::f64, Legal);
- setOperationAction(Op, MVT::v2f16, Expand);
- }
-
- // 'Expand' implements FCOPYSIGN without calling an external library.
- setOperationAction(ISD::FCOPYSIGN, MVT::f16, Expand);
- setOperationAction(ISD::FCOPYSIGN, MVT::v2f16, Expand);
- setOperationAction(ISD::FCOPYSIGN, MVT::f32, Expand);
- setOperationAction(ISD::FCOPYSIGN, MVT::f64, Expand);
-
- // These map to corresponding instructions for f32/f64. f16 must be
- // promoted to f32. v2f16 is expanded to f16, which is then promoted
- // to f32.
- for (const auto &Op : {ISD::FDIV, ISD::FREM, ISD::FSQRT, ISD::FSIN, ISD::FCOS,
- ISD::FABS, ISD::FMINNUM, ISD::FMAXNUM}) {
- setOperationAction(Op, MVT::f16, Promote);
- setOperationAction(Op, MVT::f32, Legal);
- setOperationAction(Op, MVT::f64, Legal);
- setOperationAction(Op, MVT::v2f16, Expand);
- }
- setOperationAction(ISD::FMINNUM, MVT::f16, Promote);
- setOperationAction(ISD::FMAXNUM, MVT::f16, Promote);
- setOperationAction(ISD::FMINNAN, MVT::f16, Promote);
- setOperationAction(ISD::FMAXNAN, MVT::f16, Promote);
-
- // No FEXP2, FLOG2. The PTX ex2 and log2 functions are always approximate.
- // No FPOW or FREM in PTX.
-
- // Now deduce the information based on the above mentioned
- // actions
- computeRegisterProperties(STI.getRegisterInfo());
-}
-
-const char *NVPTXTargetLowering::getTargetNodeName(unsigned Opcode) const {
- switch ((NVPTXISD::NodeType)Opcode) {
- case NVPTXISD::FIRST_NUMBER:
- break;
- case NVPTXISD::CALL:
- return "NVPTXISD::CALL";
- case NVPTXISD::RET_FLAG:
- return "NVPTXISD::RET_FLAG";
- case NVPTXISD::LOAD_PARAM:
- return "NVPTXISD::LOAD_PARAM";
- case NVPTXISD::Wrapper:
- return "NVPTXISD::Wrapper";
- case NVPTXISD::DeclareParam:
- return "NVPTXISD::DeclareParam";
- case NVPTXISD::DeclareScalarParam:
- return "NVPTXISD::DeclareScalarParam";
- case NVPTXISD::DeclareRet:
- return "NVPTXISD::DeclareRet";
- case NVPTXISD::DeclareScalarRet:
- return "NVPTXISD::DeclareScalarRet";
- case NVPTXISD::DeclareRetParam:
- return "NVPTXISD::DeclareRetParam";
- case NVPTXISD::PrintCall:
- return "NVPTXISD::PrintCall";
- case NVPTXISD::PrintConvergentCall:
- return "NVPTXISD::PrintConvergentCall";
- case NVPTXISD::PrintCallUni:
- return "NVPTXISD::PrintCallUni";
- case NVPTXISD::PrintConvergentCallUni:
- return "NVPTXISD::PrintConvergentCallUni";
- case NVPTXISD::LoadParam:
- return "NVPTXISD::LoadParam";
- case NVPTXISD::LoadParamV2:
- return "NVPTXISD::LoadParamV2";
- case NVPTXISD::LoadParamV4:
- return "NVPTXISD::LoadParamV4";
- case NVPTXISD::StoreParam:
- return "NVPTXISD::StoreParam";
- case NVPTXISD::StoreParamV2:
- return "NVPTXISD::StoreParamV2";
- case NVPTXISD::StoreParamV4:
- return "NVPTXISD::StoreParamV4";
- case NVPTXISD::StoreParamS32:
- return "NVPTXISD::StoreParamS32";
- case NVPTXISD::StoreParamU32:
- return "NVPTXISD::StoreParamU32";
- case NVPTXISD::CallArgBegin:
- return "NVPTXISD::CallArgBegin";
- case NVPTXISD::CallArg:
- return "NVPTXISD::CallArg";
- case NVPTXISD::LastCallArg:
- return "NVPTXISD::LastCallArg";
- case NVPTXISD::CallArgEnd:
- return "NVPTXISD::CallArgEnd";
- case NVPTXISD::CallVoid:
- return "NVPTXISD::CallVoid";
- case NVPTXISD::CallVal:
- return "NVPTXISD::CallVal";
- case NVPTXISD::CallSymbol:
- return "NVPTXISD::CallSymbol";
- case NVPTXISD::Prototype:
- return "NVPTXISD::Prototype";
- case NVPTXISD::MoveParam:
- return "NVPTXISD::MoveParam";
- case NVPTXISD::StoreRetval:
- return "NVPTXISD::StoreRetval";
- case NVPTXISD::StoreRetvalV2:
- return "NVPTXISD::StoreRetvalV2";
- case NVPTXISD::StoreRetvalV4:
- return "NVPTXISD::StoreRetvalV4";
- case NVPTXISD::PseudoUseParam:
- return "NVPTXISD::PseudoUseParam";
- case NVPTXISD::RETURN:
- return "NVPTXISD::RETURN";
- case NVPTXISD::CallSeqBegin:
- return "NVPTXISD::CallSeqBegin";
- case NVPTXISD::CallSeqEnd:
- return "NVPTXISD::CallSeqEnd";
- case NVPTXISD::CallPrototype:
- return "NVPTXISD::CallPrototype";
- case NVPTXISD::LoadV2:
- return "NVPTXISD::LoadV2";
- case NVPTXISD::LoadV4:
- return "NVPTXISD::LoadV4";
- case NVPTXISD::LDGV2:
- return "NVPTXISD::LDGV2";
- case NVPTXISD::LDGV4:
- return "NVPTXISD::LDGV4";
- case NVPTXISD::LDUV2:
- return "NVPTXISD::LDUV2";
- case NVPTXISD::LDUV4:
- return "NVPTXISD::LDUV4";
- case NVPTXISD::StoreV2:
- return "NVPTXISD::StoreV2";
- case NVPTXISD::StoreV4:
- return "NVPTXISD::StoreV4";
- case NVPTXISD::FUN_SHFL_CLAMP:
- return "NVPTXISD::FUN_SHFL_CLAMP";
- case NVPTXISD::FUN_SHFR_CLAMP:
- return "NVPTXISD::FUN_SHFR_CLAMP";
- case NVPTXISD::IMAD:
- return "NVPTXISD::IMAD";
- case NVPTXISD::SETP_F16X2:
- return "NVPTXISD::SETP_F16X2";
- case NVPTXISD::Dummy:
- return "NVPTXISD::Dummy";
- case NVPTXISD::MUL_WIDE_SIGNED:
- return "NVPTXISD::MUL_WIDE_SIGNED";
- case NVPTXISD::MUL_WIDE_UNSIGNED:
- return "NVPTXISD::MUL_WIDE_UNSIGNED";
- case NVPTXISD::Tex1DFloatS32: return "NVPTXISD::Tex1DFloatS32";
- case NVPTXISD::Tex1DFloatFloat: return "NVPTXISD::Tex1DFloatFloat";
- case NVPTXISD::Tex1DFloatFloatLevel:
- return "NVPTXISD::Tex1DFloatFloatLevel";
- case NVPTXISD::Tex1DFloatFloatGrad:
- return "NVPTXISD::Tex1DFloatFloatGrad";
- case NVPTXISD::Tex1DS32S32: return "NVPTXISD::Tex1DS32S32";
- case NVPTXISD::Tex1DS32Float: return "NVPTXISD::Tex1DS32Float";
- case NVPTXISD::Tex1DS32FloatLevel:
- return "NVPTXISD::Tex1DS32FloatLevel";
- case NVPTXISD::Tex1DS32FloatGrad:
- return "NVPTXISD::Tex1DS32FloatGrad";
- case NVPTXISD::Tex1DU32S32: return "NVPTXISD::Tex1DU32S32";
- case NVPTXISD::Tex1DU32Float: return "NVPTXISD::Tex1DU32Float";
- case NVPTXISD::Tex1DU32FloatLevel:
- return "NVPTXISD::Tex1DU32FloatLevel";
- case NVPTXISD::Tex1DU32FloatGrad:
- return "NVPTXISD::Tex1DU32FloatGrad";
- case NVPTXISD::Tex1DArrayFloatS32: return "NVPTXISD::Tex1DArrayFloatS32";
- case NVPTXISD::Tex1DArrayFloatFloat: return "NVPTXISD::Tex1DArrayFloatFloat";
- case NVPTXISD::Tex1DArrayFloatFloatLevel:
- return "NVPTXISD::Tex1DArrayFloatFloatLevel";
- case NVPTXISD::Tex1DArrayFloatFloatGrad:
- return "NVPTXISD::Tex1DArrayFloatFloatGrad";
- case NVPTXISD::Tex1DArrayS32S32: return "NVPTXISD::Tex1DArrayS32S32";
- case NVPTXISD::Tex1DArrayS32Float: return "NVPTXISD::Tex1DArrayS32Float";
- case NVPTXISD::Tex1DArrayS32FloatLevel:
- return "NVPTXISD::Tex1DArrayS32FloatLevel";
- case NVPTXISD::Tex1DArrayS32FloatGrad:
- return "NVPTXISD::Tex1DArrayS32FloatGrad";
- case NVPTXISD::Tex1DArrayU32S32: return "NVPTXISD::Tex1DArrayU32S32";
- case NVPTXISD::Tex1DArrayU32Float: return "NVPTXISD::Tex1DArrayU32Float";
- case NVPTXISD::Tex1DArrayU32FloatLevel:
- return "NVPTXISD::Tex1DArrayU32FloatLevel";
- case NVPTXISD::Tex1DArrayU32FloatGrad:
- return "NVPTXISD::Tex1DArrayU32FloatGrad";
- case NVPTXISD::Tex2DFloatS32: return "NVPTXISD::Tex2DFloatS32";
- case NVPTXISD::Tex2DFloatFloat: return "NVPTXISD::Tex2DFloatFloat";
- case NVPTXISD::Tex2DFloatFloatLevel:
- return "NVPTXISD::Tex2DFloatFloatLevel";
- case NVPTXISD::Tex2DFloatFloatGrad:
- return "NVPTXISD::Tex2DFloatFloatGrad";
- case NVPTXISD::Tex2DS32S32: return "NVPTXISD::Tex2DS32S32";
- case NVPTXISD::Tex2DS32Float: return "NVPTXISD::Tex2DS32Float";
- case NVPTXISD::Tex2DS32FloatLevel:
- return "NVPTXISD::Tex2DS32FloatLevel";
- case NVPTXISD::Tex2DS32FloatGrad:
- return "NVPTXISD::Tex2DS32FloatGrad";
- case NVPTXISD::Tex2DU32S32: return "NVPTXISD::Tex2DU32S32";
- case NVPTXISD::Tex2DU32Float: return "NVPTXISD::Tex2DU32Float";
- case NVPTXISD::Tex2DU32FloatLevel:
- return "NVPTXISD::Tex2DU32FloatLevel";
- case NVPTXISD::Tex2DU32FloatGrad:
- return "NVPTXISD::Tex2DU32FloatGrad";
- case NVPTXISD::Tex2DArrayFloatS32: return "NVPTXISD::Tex2DArrayFloatS32";
- case NVPTXISD::Tex2DArrayFloatFloat: return "NVPTXISD::Tex2DArrayFloatFloat";
- case NVPTXISD::Tex2DArrayFloatFloatLevel:
- return "NVPTXISD::Tex2DArrayFloatFloatLevel";
- case NVPTXISD::Tex2DArrayFloatFloatGrad:
- return "NVPTXISD::Tex2DArrayFloatFloatGrad";
- case NVPTXISD::Tex2DArrayS32S32: return "NVPTXISD::Tex2DArrayS32S32";
- case NVPTXISD::Tex2DArrayS32Float: return "NVPTXISD::Tex2DArrayS32Float";
- case NVPTXISD::Tex2DArrayS32FloatLevel:
- return "NVPTXISD::Tex2DArrayS32FloatLevel";
- case NVPTXISD::Tex2DArrayS32FloatGrad:
- return "NVPTXISD::Tex2DArrayS32FloatGrad";
- case NVPTXISD::Tex2DArrayU32S32: return "NVPTXISD::Tex2DArrayU32S32";
- case NVPTXISD::Tex2DArrayU32Float: return "NVPTXISD::Tex2DArrayU32Float";
- case NVPTXISD::Tex2DArrayU32FloatLevel:
- return "NVPTXISD::Tex2DArrayU32FloatLevel";
- case NVPTXISD::Tex2DArrayU32FloatGrad:
- return "NVPTXISD::Tex2DArrayU32FloatGrad";
- case NVPTXISD::Tex3DFloatS32: return "NVPTXISD::Tex3DFloatS32";
- case NVPTXISD::Tex3DFloatFloat: return "NVPTXISD::Tex3DFloatFloat";
- case NVPTXISD::Tex3DFloatFloatLevel:
- return "NVPTXISD::Tex3DFloatFloatLevel";
- case NVPTXISD::Tex3DFloatFloatGrad:
- return "NVPTXISD::Tex3DFloatFloatGrad";
- case NVPTXISD::Tex3DS32S32: return "NVPTXISD::Tex3DS32S32";
- case NVPTXISD::Tex3DS32Float: return "NVPTXISD::Tex3DS32Float";
- case NVPTXISD::Tex3DS32FloatLevel:
- return "NVPTXISD::Tex3DS32FloatLevel";
- case NVPTXISD::Tex3DS32FloatGrad:
- return "NVPTXISD::Tex3DS32FloatGrad";
- case NVPTXISD::Tex3DU32S32: return "NVPTXISD::Tex3DU32S32";
- case NVPTXISD::Tex3DU32Float: return "NVPTXISD::Tex3DU32Float";
- case NVPTXISD::Tex3DU32FloatLevel:
- return "NVPTXISD::Tex3DU32FloatLevel";
- case NVPTXISD::Tex3DU32FloatGrad:
- return "NVPTXISD::Tex3DU32FloatGrad";
- case NVPTXISD::TexCubeFloatFloat: return "NVPTXISD::TexCubeFloatFloat";
- case NVPTXISD::TexCubeFloatFloatLevel:
- return "NVPTXISD::TexCubeFloatFloatLevel";
- case NVPTXISD::TexCubeS32Float: return "NVPTXISD::TexCubeS32Float";
- case NVPTXISD::TexCubeS32FloatLevel:
- return "NVPTXISD::TexCubeS32FloatLevel";
- case NVPTXISD::TexCubeU32Float: return "NVPTXISD::TexCubeU32Float";
- case NVPTXISD::TexCubeU32FloatLevel:
- return "NVPTXISD::TexCubeU32FloatLevel";
- case NVPTXISD::TexCubeArrayFloatFloat:
- return "NVPTXISD::TexCubeArrayFloatFloat";
- case NVPTXISD::TexCubeArrayFloatFloatLevel:
- return "NVPTXISD::TexCubeArrayFloatFloatLevel";
- case NVPTXISD::TexCubeArrayS32Float:
- return "NVPTXISD::TexCubeArrayS32Float";
- case NVPTXISD::TexCubeArrayS32FloatLevel:
- return "NVPTXISD::TexCubeArrayS32FloatLevel";
- case NVPTXISD::TexCubeArrayU32Float:
- return "NVPTXISD::TexCubeArrayU32Float";
- case NVPTXISD::TexCubeArrayU32FloatLevel:
- return "NVPTXISD::TexCubeArrayU32FloatLevel";
- case NVPTXISD::Tld4R2DFloatFloat:
- return "NVPTXISD::Tld4R2DFloatFloat";
- case NVPTXISD::Tld4G2DFloatFloat:
- return "NVPTXISD::Tld4G2DFloatFloat";
- case NVPTXISD::Tld4B2DFloatFloat:
- return "NVPTXISD::Tld4B2DFloatFloat";
- case NVPTXISD::Tld4A2DFloatFloat:
- return "NVPTXISD::Tld4A2DFloatFloat";
- case NVPTXISD::Tld4R2DS64Float:
- return "NVPTXISD::Tld4R2DS64Float";
- case NVPTXISD::Tld4G2DS64Float:
- return "NVPTXISD::Tld4G2DS64Float";
- case NVPTXISD::Tld4B2DS64Float:
- return "NVPTXISD::Tld4B2DS64Float";
- case NVPTXISD::Tld4A2DS64Float:
- return "NVPTXISD::Tld4A2DS64Float";
- case NVPTXISD::Tld4R2DU64Float:
- return "NVPTXISD::Tld4R2DU64Float";
- case NVPTXISD::Tld4G2DU64Float:
- return "NVPTXISD::Tld4G2DU64Float";
- case NVPTXISD::Tld4B2DU64Float:
- return "NVPTXISD::Tld4B2DU64Float";
- case NVPTXISD::Tld4A2DU64Float:
- return "NVPTXISD::Tld4A2DU64Float";
-
- case NVPTXISD::TexUnified1DFloatS32:
- return "NVPTXISD::TexUnified1DFloatS32";
- case NVPTXISD::TexUnified1DFloatFloat:
- return "NVPTXISD::TexUnified1DFloatFloat";
- case NVPTXISD::TexUnified1DFloatFloatLevel:
- return "NVPTXISD::TexUnified1DFloatFloatLevel";
- case NVPTXISD::TexUnified1DFloatFloatGrad:
- return "NVPTXISD::TexUnified1DFloatFloatGrad";
- case NVPTXISD::TexUnified1DS32S32:
- return "NVPTXISD::TexUnified1DS32S32";
- case NVPTXISD::TexUnified1DS32Float:
- return "NVPTXISD::TexUnified1DS32Float";
- case NVPTXISD::TexUnified1DS32FloatLevel:
- return "NVPTXISD::TexUnified1DS32FloatLevel";
- case NVPTXISD::TexUnified1DS32FloatGrad:
- return "NVPTXISD::TexUnified1DS32FloatGrad";
- case NVPTXISD::TexUnified1DU32S32:
- return "NVPTXISD::TexUnified1DU32S32";
- case NVPTXISD::TexUnified1DU32Float:
- return "NVPTXISD::TexUnified1DU32Float";
- case NVPTXISD::TexUnified1DU32FloatLevel:
- return "NVPTXISD::TexUnified1DU32FloatLevel";
- case NVPTXISD::TexUnified1DU32FloatGrad:
- return "NVPTXISD::TexUnified1DU32FloatGrad";
- case NVPTXISD::TexUnified1DArrayFloatS32:
- return "NVPTXISD::TexUnified1DArrayFloatS32";
- case NVPTXISD::TexUnified1DArrayFloatFloat:
- return "NVPTXISD::TexUnified1DArrayFloatFloat";
- case NVPTXISD::TexUnified1DArrayFloatFloatLevel:
- return "NVPTXISD::TexUnified1DArrayFloatFloatLevel";
- case NVPTXISD::TexUnified1DArrayFloatFloatGrad:
- return "NVPTXISD::TexUnified1DArrayFloatFloatGrad";
- case NVPTXISD::TexUnified1DArrayS32S32:
- return "NVPTXISD::TexUnified1DArrayS32S32";
- case NVPTXISD::TexUnified1DArrayS32Float:
- return "NVPTXISD::TexUnified1DArrayS32Float";
- case NVPTXISD::TexUnified1DArrayS32FloatLevel:
- return "NVPTXISD::TexUnified1DArrayS32FloatLevel";
- case NVPTXISD::TexUnified1DArrayS32FloatGrad:
- return "NVPTXISD::TexUnified1DArrayS32FloatGrad";
- case NVPTXISD::TexUnified1DArrayU32S32:
- return "NVPTXISD::TexUnified1DArrayU32S32";
- case NVPTXISD::TexUnified1DArrayU32Float:
- return "NVPTXISD::TexUnified1DArrayU32Float";
- case NVPTXISD::TexUnified1DArrayU32FloatLevel:
- return "NVPTXISD::TexUnified1DArrayU32FloatLevel";
- case NVPTXISD::TexUnified1DArrayU32FloatGrad:
- return "NVPTXISD::TexUnified1DArrayU32FloatGrad";
- case NVPTXISD::TexUnified2DFloatS32:
- return "NVPTXISD::TexUnified2DFloatS32";
- case NVPTXISD::TexUnified2DFloatFloat:
- return "NVPTXISD::TexUnified2DFloatFloat";
- case NVPTXISD::TexUnified2DFloatFloatLevel:
- return "NVPTXISD::TexUnified2DFloatFloatLevel";
- case NVPTXISD::TexUnified2DFloatFloatGrad:
- return "NVPTXISD::TexUnified2DFloatFloatGrad";
- case NVPTXISD::TexUnified2DS32S32:
- return "NVPTXISD::TexUnified2DS32S32";
- case NVPTXISD::TexUnified2DS32Float:
- return "NVPTXISD::TexUnified2DS32Float";
- case NVPTXISD::TexUnified2DS32FloatLevel:
- return "NVPTXISD::TexUnified2DS32FloatLevel";
- case NVPTXISD::TexUnified2DS32FloatGrad:
- return "NVPTXISD::TexUnified2DS32FloatGrad";
- case NVPTXISD::TexUnified2DU32S32:
- return "NVPTXISD::TexUnified2DU32S32";
- case NVPTXISD::TexUnified2DU32Float:
- return "NVPTXISD::TexUnified2DU32Float";
- case NVPTXISD::TexUnified2DU32FloatLevel:
- return "NVPTXISD::TexUnified2DU32FloatLevel";
- case NVPTXISD::TexUnified2DU32FloatGrad:
- return "NVPTXISD::TexUnified2DU32FloatGrad";
- case NVPTXISD::TexUnified2DArrayFloatS32:
- return "NVPTXISD::TexUnified2DArrayFloatS32";
- case NVPTXISD::TexUnified2DArrayFloatFloat:
- return "NVPTXISD::TexUnified2DArrayFloatFloat";
- case NVPTXISD::TexUnified2DArrayFloatFloatLevel:
- return "NVPTXISD::TexUnified2DArrayFloatFloatLevel";
- case NVPTXISD::TexUnified2DArrayFloatFloatGrad:
- return "NVPTXISD::TexUnified2DArrayFloatFloatGrad";
- case NVPTXISD::TexUnified2DArrayS32S32:
- return "NVPTXISD::TexUnified2DArrayS32S32";
- case NVPTXISD::TexUnified2DArrayS32Float:
- return "NVPTXISD::TexUnified2DArrayS32Float";
- case NVPTXISD::TexUnified2DArrayS32FloatLevel:
- return "NVPTXISD::TexUnified2DArrayS32FloatLevel";
- case NVPTXISD::TexUnified2DArrayS32FloatGrad:
- return "NVPTXISD::TexUnified2DArrayS32FloatGrad";
- case NVPTXISD::TexUnified2DArrayU32S32:
- return "NVPTXISD::TexUnified2DArrayU32S32";
- case NVPTXISD::TexUnified2DArrayU32Float:
- return "NVPTXISD::TexUnified2DArrayU32Float";
- case NVPTXISD::TexUnified2DArrayU32FloatLevel:
- return "NVPTXISD::TexUnified2DArrayU32FloatLevel";
- case NVPTXISD::TexUnified2DArrayU32FloatGrad:
- return "NVPTXISD::TexUnified2DArrayU32FloatGrad";
- case NVPTXISD::TexUnified3DFloatS32:
- return "NVPTXISD::TexUnified3DFloatS32";
- case NVPTXISD::TexUnified3DFloatFloat:
- return "NVPTXISD::TexUnified3DFloatFloat";
- case NVPTXISD::TexUnified3DFloatFloatLevel:
- return "NVPTXISD::TexUnified3DFloatFloatLevel";
- case NVPTXISD::TexUnified3DFloatFloatGrad:
- return "NVPTXISD::TexUnified3DFloatFloatGrad";
- case NVPTXISD::TexUnified3DS32S32:
- return "NVPTXISD::TexUnified3DS32S32";
- case NVPTXISD::TexUnified3DS32Float:
- return "NVPTXISD::TexUnified3DS32Float";
- case NVPTXISD::TexUnified3DS32FloatLevel:
- return "NVPTXISD::TexUnified3DS32FloatLevel";
- case NVPTXISD::TexUnified3DS32FloatGrad:
- return "NVPTXISD::TexUnified3DS32FloatGrad";
- case NVPTXISD::TexUnified3DU32S32:
- return "NVPTXISD::TexUnified3DU32S32";
- case NVPTXISD::TexUnified3DU32Float:
- return "NVPTXISD::TexUnified3DU32Float";
- case NVPTXISD::TexUnified3DU32FloatLevel:
- return "NVPTXISD::TexUnified3DU32FloatLevel";
- case NVPTXISD::TexUnified3DU32FloatGrad:
- return "NVPTXISD::TexUnified3DU32FloatGrad";
- case NVPTXISD::TexUnifiedCubeFloatFloat:
- return "NVPTXISD::TexUnifiedCubeFloatFloat";
- case NVPTXISD::TexUnifiedCubeFloatFloatLevel:
- return "NVPTXISD::TexUnifiedCubeFloatFloatLevel";
- case NVPTXISD::TexUnifiedCubeS32Float:
- return "NVPTXISD::TexUnifiedCubeS32Float";
- case NVPTXISD::TexUnifiedCubeS32FloatLevel:
- return "NVPTXISD::TexUnifiedCubeS32FloatLevel";
- case NVPTXISD::TexUnifiedCubeU32Float:
- return "NVPTXISD::TexUnifiedCubeU32Float";
- case NVPTXISD::TexUnifiedCubeU32FloatLevel:
- return "NVPTXISD::TexUnifiedCubeU32FloatLevel";
- case NVPTXISD::TexUnifiedCubeArrayFloatFloat:
- return "NVPTXISD::TexUnifiedCubeArrayFloatFloat";
- case NVPTXISD::TexUnifiedCubeArrayFloatFloatLevel:
- return "NVPTXISD::TexUnifiedCubeArrayFloatFloatLevel";
- case NVPTXISD::TexUnifiedCubeArrayS32Float:
- return "NVPTXISD::TexUnifiedCubeArrayS32Float";
- case NVPTXISD::TexUnifiedCubeArrayS32FloatLevel:
- return "NVPTXISD::TexUnifiedCubeArrayS32FloatLevel";
- case NVPTXISD::TexUnifiedCubeArrayU32Float:
- return "NVPTXISD::TexUnifiedCubeArrayU32Float";
- case NVPTXISD::TexUnifiedCubeArrayU32FloatLevel:
- return "NVPTXISD::TexUnifiedCubeArrayU32FloatLevel";
- case NVPTXISD::Tld4UnifiedR2DFloatFloat:
- return "NVPTXISD::Tld4UnifiedR2DFloatFloat";
- case NVPTXISD::Tld4UnifiedG2DFloatFloat:
- return "NVPTXISD::Tld4UnifiedG2DFloatFloat";
- case NVPTXISD::Tld4UnifiedB2DFloatFloat:
- return "NVPTXISD::Tld4UnifiedB2DFloatFloat";
- case NVPTXISD::Tld4UnifiedA2DFloatFloat:
- return "NVPTXISD::Tld4UnifiedA2DFloatFloat";
- case NVPTXISD::Tld4UnifiedR2DS64Float:
- return "NVPTXISD::Tld4UnifiedR2DS64Float";
- case NVPTXISD::Tld4UnifiedG2DS64Float:
- return "NVPTXISD::Tld4UnifiedG2DS64Float";
- case NVPTXISD::Tld4UnifiedB2DS64Float:
- return "NVPTXISD::Tld4UnifiedB2DS64Float";
- case NVPTXISD::Tld4UnifiedA2DS64Float:
- return "NVPTXISD::Tld4UnifiedA2DS64Float";
- case NVPTXISD::Tld4UnifiedR2DU64Float:
- return "NVPTXISD::Tld4UnifiedR2DU64Float";
- case NVPTXISD::Tld4UnifiedG2DU64Float:
- return "NVPTXISD::Tld4UnifiedG2DU64Float";
- case NVPTXISD::Tld4UnifiedB2DU64Float:
- return "NVPTXISD::Tld4UnifiedB2DU64Float";
- case NVPTXISD::Tld4UnifiedA2DU64Float:
- return "NVPTXISD::Tld4UnifiedA2DU64Float";
-
- case NVPTXISD::Suld1DI8Clamp: return "NVPTXISD::Suld1DI8Clamp";
- case NVPTXISD::Suld1DI16Clamp: return "NVPTXISD::Suld1DI16Clamp";
- case NVPTXISD::Suld1DI32Clamp: return "NVPTXISD::Suld1DI32Clamp";
- case NVPTXISD::Suld1DI64Clamp: return "NVPTXISD::Suld1DI64Clamp";
- case NVPTXISD::Suld1DV2I8Clamp: return "NVPTXISD::Suld1DV2I8Clamp";
- case NVPTXISD::Suld1DV2I16Clamp: return "NVPTXISD::Suld1DV2I16Clamp";
- case NVPTXISD::Suld1DV2I32Clamp: return "NVPTXISD::Suld1DV2I32Clamp";
- case NVPTXISD::Suld1DV2I64Clamp: return "NVPTXISD::Suld1DV2I64Clamp";
- case NVPTXISD::Suld1DV4I8Clamp: return "NVPTXISD::Suld1DV4I8Clamp";
- case NVPTXISD::Suld1DV4I16Clamp: return "NVPTXISD::Suld1DV4I16Clamp";
- case NVPTXISD::Suld1DV4I32Clamp: return "NVPTXISD::Suld1DV4I32Clamp";
-
- case NVPTXISD::Suld1DArrayI8Clamp: return "NVPTXISD::Suld1DArrayI8Clamp";
- case NVPTXISD::Suld1DArrayI16Clamp: return "NVPTXISD::Suld1DArrayI16Clamp";
- case NVPTXISD::Suld1DArrayI32Clamp: return "NVPTXISD::Suld1DArrayI32Clamp";
- case NVPTXISD::Suld1DArrayI64Clamp: return "NVPTXISD::Suld1DArrayI64Clamp";
- case NVPTXISD::Suld1DArrayV2I8Clamp: return "NVPTXISD::Suld1DArrayV2I8Clamp";
- case NVPTXISD::Suld1DArrayV2I16Clamp:return "NVPTXISD::Suld1DArrayV2I16Clamp";
- case NVPTXISD::Suld1DArrayV2I32Clamp:return "NVPTXISD::Suld1DArrayV2I32Clamp";
- case NVPTXISD::Suld1DArrayV2I64Clamp:return "NVPTXISD::Suld1DArrayV2I64Clamp";
- case NVPTXISD::Suld1DArrayV4I8Clamp: return "NVPTXISD::Suld1DArrayV4I8Clamp";
- case NVPTXISD::Suld1DArrayV4I16Clamp:return "NVPTXISD::Suld1DArrayV4I16Clamp";
- case NVPTXISD::Suld1DArrayV4I32Clamp:return "NVPTXISD::Suld1DArrayV4I32Clamp";
-
- case NVPTXISD::Suld2DI8Clamp: return "NVPTXISD::Suld2DI8Clamp";
- case NVPTXISD::Suld2DI16Clamp: return "NVPTXISD::Suld2DI16Clamp";
- case NVPTXISD::Suld2DI32Clamp: return "NVPTXISD::Suld2DI32Clamp";
- case NVPTXISD::Suld2DI64Clamp: return "NVPTXISD::Suld2DI64Clamp";
- case NVPTXISD::Suld2DV2I8Clamp: return "NVPTXISD::Suld2DV2I8Clamp";
- case NVPTXISD::Suld2DV2I16Clamp: return "NVPTXISD::Suld2DV2I16Clamp";
- case NVPTXISD::Suld2DV2I32Clamp: return "NVPTXISD::Suld2DV2I32Clamp";
- case NVPTXISD::Suld2DV2I64Clamp: return "NVPTXISD::Suld2DV2I64Clamp";
- case NVPTXISD::Suld2DV4I8Clamp: return "NVPTXISD::Suld2DV4I8Clamp";
- case NVPTXISD::Suld2DV4I16Clamp: return "NVPTXISD::Suld2DV4I16Clamp";
- case NVPTXISD::Suld2DV4I32Clamp: return "NVPTXISD::Suld2DV4I32Clamp";
-
- case NVPTXISD::Suld2DArrayI8Clamp: return "NVPTXISD::Suld2DArrayI8Clamp";
- case NVPTXISD::Suld2DArrayI16Clamp: return "NVPTXISD::Suld2DArrayI16Clamp";
- case NVPTXISD::Suld2DArrayI32Clamp: return "NVPTXISD::Suld2DArrayI32Clamp";
- case NVPTXISD::Suld2DArrayI64Clamp: return "NVPTXISD::Suld2DArrayI64Clamp";
- case NVPTXISD::Suld2DArrayV2I8Clamp: return "NVPTXISD::Suld2DArrayV2I8Clamp";
- case NVPTXISD::Suld2DArrayV2I16Clamp:return "NVPTXISD::Suld2DArrayV2I16Clamp";
- case NVPTXISD::Suld2DArrayV2I32Clamp:return "NVPTXISD::Suld2DArrayV2I32Clamp";
- case NVPTXISD::Suld2DArrayV2I64Clamp:return "NVPTXISD::Suld2DArrayV2I64Clamp";
- case NVPTXISD::Suld2DArrayV4I8Clamp: return "NVPTXISD::Suld2DArrayV4I8Clamp";
- case NVPTXISD::Suld2DArrayV4I16Clamp:return "NVPTXISD::Suld2DArrayV4I16Clamp";
- case NVPTXISD::Suld2DArrayV4I32Clamp:return "NVPTXISD::Suld2DArrayV4I32Clamp";
-
- case NVPTXISD::Suld3DI8Clamp: return "NVPTXISD::Suld3DI8Clamp";
- case NVPTXISD::Suld3DI16Clamp: return "NVPTXISD::Suld3DI16Clamp";
- case NVPTXISD::Suld3DI32Clamp: return "NVPTXISD::Suld3DI32Clamp";
- case NVPTXISD::Suld3DI64Clamp: return "NVPTXISD::Suld3DI64Clamp";
- case NVPTXISD::Suld3DV2I8Clamp: return "NVPTXISD::Suld3DV2I8Clamp";
- case NVPTXISD::Suld3DV2I16Clamp: return "NVPTXISD::Suld3DV2I16Clamp";
- case NVPTXISD::Suld3DV2I32Clamp: return "NVPTXISD::Suld3DV2I32Clamp";
- case NVPTXISD::Suld3DV2I64Clamp: return "NVPTXISD::Suld3DV2I64Clamp";
- case NVPTXISD::Suld3DV4I8Clamp: return "NVPTXISD::Suld3DV4I8Clamp";
- case NVPTXISD::Suld3DV4I16Clamp: return "NVPTXISD::Suld3DV4I16Clamp";
- case NVPTXISD::Suld3DV4I32Clamp: return "NVPTXISD::Suld3DV4I32Clamp";
-
- case NVPTXISD::Suld1DI8Trap: return "NVPTXISD::Suld1DI8Trap";
- case NVPTXISD::Suld1DI16Trap: return "NVPTXISD::Suld1DI16Trap";
- case NVPTXISD::Suld1DI32Trap: return "NVPTXISD::Suld1DI32Trap";
- case NVPTXISD::Suld1DI64Trap: return "NVPTXISD::Suld1DI64Trap";
- case NVPTXISD::Suld1DV2I8Trap: return "NVPTXISD::Suld1DV2I8Trap";
- case NVPTXISD::Suld1DV2I16Trap: return "NVPTXISD::Suld1DV2I16Trap";
- case NVPTXISD::Suld1DV2I32Trap: return "NVPTXISD::Suld1DV2I32Trap";
- case NVPTXISD::Suld1DV2I64Trap: return "NVPTXISD::Suld1DV2I64Trap";
- case NVPTXISD::Suld1DV4I8Trap: return "NVPTXISD::Suld1DV4I8Trap";
- case NVPTXISD::Suld1DV4I16Trap: return "NVPTXISD::Suld1DV4I16Trap";
- case NVPTXISD::Suld1DV4I32Trap: return "NVPTXISD::Suld1DV4I32Trap";
-
- case NVPTXISD::Suld1DArrayI8Trap: return "NVPTXISD::Suld1DArrayI8Trap";
- case NVPTXISD::Suld1DArrayI16Trap: return "NVPTXISD::Suld1DArrayI16Trap";
- case NVPTXISD::Suld1DArrayI32Trap: return "NVPTXISD::Suld1DArrayI32Trap";
- case NVPTXISD::Suld1DArrayI64Trap: return "NVPTXISD::Suld1DArrayI64Trap";
- case NVPTXISD::Suld1DArrayV2I8Trap: return "NVPTXISD::Suld1DArrayV2I8Trap";
- case NVPTXISD::Suld1DArrayV2I16Trap: return "NVPTXISD::Suld1DArrayV2I16Trap";
- case NVPTXISD::Suld1DArrayV2I32Trap: return "NVPTXISD::Suld1DArrayV2I32Trap";
- case NVPTXISD::Suld1DArrayV2I64Trap: return "NVPTXISD::Suld1DArrayV2I64Trap";
- case NVPTXISD::Suld1DArrayV4I8Trap: return "NVPTXISD::Suld1DArrayV4I8Trap";
- case NVPTXISD::Suld1DArrayV4I16Trap: return "NVPTXISD::Suld1DArrayV4I16Trap";
- case NVPTXISD::Suld1DArrayV4I32Trap: return "NVPTXISD::Suld1DArrayV4I32Trap";
-
- case NVPTXISD::Suld2DI8Trap: return "NVPTXISD::Suld2DI8Trap";
- case NVPTXISD::Suld2DI16Trap: return "NVPTXISD::Suld2DI16Trap";
- case NVPTXISD::Suld2DI32Trap: return "NVPTXISD::Suld2DI32Trap";
- case NVPTXISD::Suld2DI64Trap: return "NVPTXISD::Suld2DI64Trap";
- case NVPTXISD::Suld2DV2I8Trap: return "NVPTXISD::Suld2DV2I8Trap";
- case NVPTXISD::Suld2DV2I16Trap: return "NVPTXISD::Suld2DV2I16Trap";
- case NVPTXISD::Suld2DV2I32Trap: return "NVPTXISD::Suld2DV2I32Trap";
- case NVPTXISD::Suld2DV2I64Trap: return "NVPTXISD::Suld2DV2I64Trap";
- case NVPTXISD::Suld2DV4I8Trap: return "NVPTXISD::Suld2DV4I8Trap";
- case NVPTXISD::Suld2DV4I16Trap: return "NVPTXISD::Suld2DV4I16Trap";
- case NVPTXISD::Suld2DV4I32Trap: return "NVPTXISD::Suld2DV4I32Trap";
-
- case NVPTXISD::Suld2DArrayI8Trap: return "NVPTXISD::Suld2DArrayI8Trap";
- case NVPTXISD::Suld2DArrayI16Trap: return "NVPTXISD::Suld2DArrayI16Trap";
- case NVPTXISD::Suld2DArrayI32Trap: return "NVPTXISD::Suld2DArrayI32Trap";
- case NVPTXISD::Suld2DArrayI64Trap: return "NVPTXISD::Suld2DArrayI64Trap";
- case NVPTXISD::Suld2DArrayV2I8Trap: return "NVPTXISD::Suld2DArrayV2I8Trap";
- case NVPTXISD::Suld2DArrayV2I16Trap: return "NVPTXISD::Suld2DArrayV2I16Trap";
- case NVPTXISD::Suld2DArrayV2I32Trap: return "NVPTXISD::Suld2DArrayV2I32Trap";
- case NVPTXISD::Suld2DArrayV2I64Trap: return "NVPTXISD::Suld2DArrayV2I64Trap";
- case NVPTXISD::Suld2DArrayV4I8Trap: return "NVPTXISD::Suld2DArrayV4I8Trap";
- case NVPTXISD::Suld2DArrayV4I16Trap: return "NVPTXISD::Suld2DArrayV4I16Trap";
- case NVPTXISD::Suld2DArrayV4I32Trap: return "NVPTXISD::Suld2DArrayV4I32Trap";
-
- case NVPTXISD::Suld3DI8Trap: return "NVPTXISD::Suld3DI8Trap";
- case NVPTXISD::Suld3DI16Trap: return "NVPTXISD::Suld3DI16Trap";
- case NVPTXISD::Suld3DI32Trap: return "NVPTXISD::Suld3DI32Trap";
- case NVPTXISD::Suld3DI64Trap: return "NVPTXISD::Suld3DI64Trap";
- case NVPTXISD::Suld3DV2I8Trap: return "NVPTXISD::Suld3DV2I8Trap";
- case NVPTXISD::Suld3DV2I16Trap: return "NVPTXISD::Suld3DV2I16Trap";
- case NVPTXISD::Suld3DV2I32Trap: return "NVPTXISD::Suld3DV2I32Trap";
- case NVPTXISD::Suld3DV2I64Trap: return "NVPTXISD::Suld3DV2I64Trap";
- case NVPTXISD::Suld3DV4I8Trap: return "NVPTXISD::Suld3DV4I8Trap";
- case NVPTXISD::Suld3DV4I16Trap: return "NVPTXISD::Suld3DV4I16Trap";
- case NVPTXISD::Suld3DV4I32Trap: return "NVPTXISD::Suld3DV4I32Trap";
-
- case NVPTXISD::Suld1DI8Zero: return "NVPTXISD::Suld1DI8Zero";
- case NVPTXISD::Suld1DI16Zero: return "NVPTXISD::Suld1DI16Zero";
- case NVPTXISD::Suld1DI32Zero: return "NVPTXISD::Suld1DI32Zero";
- case NVPTXISD::Suld1DI64Zero: return "NVPTXISD::Suld1DI64Zero";
- case NVPTXISD::Suld1DV2I8Zero: return "NVPTXISD::Suld1DV2I8Zero";
- case NVPTXISD::Suld1DV2I16Zero: return "NVPTXISD::Suld1DV2I16Zero";
- case NVPTXISD::Suld1DV2I32Zero: return "NVPTXISD::Suld1DV2I32Zero";
- case NVPTXISD::Suld1DV2I64Zero: return "NVPTXISD::Suld1DV2I64Zero";
- case NVPTXISD::Suld1DV4I8Zero: return "NVPTXISD::Suld1DV4I8Zero";
- case NVPTXISD::Suld1DV4I16Zero: return "NVPTXISD::Suld1DV4I16Zero";
- case NVPTXISD::Suld1DV4I32Zero: return "NVPTXISD::Suld1DV4I32Zero";
-
- case NVPTXISD::Suld1DArrayI8Zero: return "NVPTXISD::Suld1DArrayI8Zero";
- case NVPTXISD::Suld1DArrayI16Zero: return "NVPTXISD::Suld1DArrayI16Zero";
- case NVPTXISD::Suld1DArrayI32Zero: return "NVPTXISD::Suld1DArrayI32Zero";
- case NVPTXISD::Suld1DArrayI64Zero: return "NVPTXISD::Suld1DArrayI64Zero";
- case NVPTXISD::Suld1DArrayV2I8Zero: return "NVPTXISD::Suld1DArrayV2I8Zero";
- case NVPTXISD::Suld1DArrayV2I16Zero: return "NVPTXISD::Suld1DArrayV2I16Zero";
- case NVPTXISD::Suld1DArrayV2I32Zero: return "NVPTXISD::Suld1DArrayV2I32Zero";
- case NVPTXISD::Suld1DArrayV2I64Zero: return "NVPTXISD::Suld1DArrayV2I64Zero";
- case NVPTXISD::Suld1DArrayV4I8Zero: return "NVPTXISD::Suld1DArrayV4I8Zero";
- case NVPTXISD::Suld1DArrayV4I16Zero: return "NVPTXISD::Suld1DArrayV4I16Zero";
- case NVPTXISD::Suld1DArrayV4I32Zero: return "NVPTXISD::Suld1DArrayV4I32Zero";
-
- case NVPTXISD::Suld2DI8Zero: return "NVPTXISD::Suld2DI8Zero";
- case NVPTXISD::Suld2DI16Zero: return "NVPTXISD::Suld2DI16Zero";
- case NVPTXISD::Suld2DI32Zero: return "NVPTXISD::Suld2DI32Zero";
- case NVPTXISD::Suld2DI64Zero: return "NVPTXISD::Suld2DI64Zero";
- case NVPTXISD::Suld2DV2I8Zero: return "NVPTXISD::Suld2DV2I8Zero";
- case NVPTXISD::Suld2DV2I16Zero: return "NVPTXISD::Suld2DV2I16Zero";
- case NVPTXISD::Suld2DV2I32Zero: return "NVPTXISD::Suld2DV2I32Zero";
- case NVPTXISD::Suld2DV2I64Zero: return "NVPTXISD::Suld2DV2I64Zero";
- case NVPTXISD::Suld2DV4I8Zero: return "NVPTXISD::Suld2DV4I8Zero";
- case NVPTXISD::Suld2DV4I16Zero: return "NVPTXISD::Suld2DV4I16Zero";
- case NVPTXISD::Suld2DV4I32Zero: return "NVPTXISD::Suld2DV4I32Zero";
-
- case NVPTXISD::Suld2DArrayI8Zero: return "NVPTXISD::Suld2DArrayI8Zero";
- case NVPTXISD::Suld2DArrayI16Zero: return "NVPTXISD::Suld2DArrayI16Zero";
- case NVPTXISD::Suld2DArrayI32Zero: return "NVPTXISD::Suld2DArrayI32Zero";
- case NVPTXISD::Suld2DArrayI64Zero: return "NVPTXISD::Suld2DArrayI64Zero";
- case NVPTXISD::Suld2DArrayV2I8Zero: return "NVPTXISD::Suld2DArrayV2I8Zero";
- case NVPTXISD::Suld2DArrayV2I16Zero: return "NVPTXISD::Suld2DArrayV2I16Zero";
- case NVPTXISD::Suld2DArrayV2I32Zero: return "NVPTXISD::Suld2DArrayV2I32Zero";
- case NVPTXISD::Suld2DArrayV2I64Zero: return "NVPTXISD::Suld2DArrayV2I64Zero";
- case NVPTXISD::Suld2DArrayV4I8Zero: return "NVPTXISD::Suld2DArrayV4I8Zero";
- case NVPTXISD::Suld2DArrayV4I16Zero: return "NVPTXISD::Suld2DArrayV4I16Zero";
- case NVPTXISD::Suld2DArrayV4I32Zero: return "NVPTXISD::Suld2DArrayV4I32Zero";
-
- case NVPTXISD::Suld3DI8Zero: return "NVPTXISD::Suld3DI8Zero";
- case NVPTXISD::Suld3DI16Zero: return "NVPTXISD::Suld3DI16Zero";
- case NVPTXISD::Suld3DI32Zero: return "NVPTXISD::Suld3DI32Zero";
- case NVPTXISD::Suld3DI64Zero: return "NVPTXISD::Suld3DI64Zero";
- case NVPTXISD::Suld3DV2I8Zero: return "NVPTXISD::Suld3DV2I8Zero";
- case NVPTXISD::Suld3DV2I16Zero: return "NVPTXISD::Suld3DV2I16Zero";
- case NVPTXISD::Suld3DV2I32Zero: return "NVPTXISD::Suld3DV2I32Zero";
- case NVPTXISD::Suld3DV2I64Zero: return "NVPTXISD::Suld3DV2I64Zero";
- case NVPTXISD::Suld3DV4I8Zero: return "NVPTXISD::Suld3DV4I8Zero";
- case NVPTXISD::Suld3DV4I16Zero: return "NVPTXISD::Suld3DV4I16Zero";
- case NVPTXISD::Suld3DV4I32Zero: return "NVPTXISD::Suld3DV4I32Zero";
- }
- return nullptr;
-}
-
-TargetLoweringBase::LegalizeTypeAction
-NVPTXTargetLowering::getPreferredVectorAction(EVT VT) const {
- if (VT.getVectorNumElements() != 1 && VT.getScalarType() == MVT::i1)
- return TypeSplitVector;
- if (VT == MVT::v2f16)
- return TypeLegal;
- return TargetLoweringBase::getPreferredVectorAction(VT);
-}
-
-SDValue NVPTXTargetLowering::getSqrtEstimate(SDValue Operand, SelectionDAG &DAG,
- int Enabled, int &ExtraSteps,
- bool &UseOneConst,
- bool Reciprocal) const {
- if (!(Enabled == ReciprocalEstimate::Enabled ||
- (Enabled == ReciprocalEstimate::Unspecified && !usePrecSqrtF32())))
- return SDValue();
-
- if (ExtraSteps == ReciprocalEstimate::Unspecified)
- ExtraSteps = 0;
-
- SDLoc DL(Operand);
- EVT VT = Operand.getValueType();
- bool Ftz = useF32FTZ(DAG.getMachineFunction());
-
- auto MakeIntrinsicCall = [&](Intrinsic::ID IID) {
- return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, DL, VT,
- DAG.getConstant(IID, DL, MVT::i32), Operand);
- };
-
- // The sqrt and rsqrt refinement processes assume we always start out with an
- // approximation of the rsqrt. Therefore, if we're going to do any refinement
- // (i.e. ExtraSteps > 0), we must return an rsqrt. But if we're *not* doing
- // any refinement, we must return a regular sqrt.
- if (Reciprocal || ExtraSteps > 0) {
- if (VT == MVT::f32)
- return MakeIntrinsicCall(Ftz ? Intrinsic::nvvm_rsqrt_approx_ftz_f
- : Intrinsic::nvvm_rsqrt_approx_f);
- else if (VT == MVT::f64)
- return MakeIntrinsicCall(Intrinsic::nvvm_rsqrt_approx_d);
- else
- return SDValue();
- } else {
- if (VT == MVT::f32)
- return MakeIntrinsicCall(Ftz ? Intrinsic::nvvm_sqrt_approx_ftz_f
- : Intrinsic::nvvm_sqrt_approx_f);
- else {
- // There's no sqrt.approx.f64 instruction, so we emit
- // reciprocal(rsqrt(x)). This is faster than
- // select(x == 0, 0, x * rsqrt(x)). (In fact, it's faster than plain
- // x * rsqrt(x).)
- return DAG.getNode(
- ISD::INTRINSIC_WO_CHAIN, DL, VT,
- DAG.getConstant(Intrinsic::nvvm_rcp_approx_ftz_d, DL, MVT::i32),
- MakeIntrinsicCall(Intrinsic::nvvm_rsqrt_approx_d));
- }
- }
-}
-
-SDValue
-NVPTXTargetLowering::LowerGlobalAddress(SDValue Op, SelectionDAG &DAG) const {
- SDLoc dl(Op);
- const GlobalValue *GV = cast<GlobalAddressSDNode>(Op)->getGlobal();
- auto PtrVT = getPointerTy(DAG.getDataLayout());
- Op = DAG.getTargetGlobalAddress(GV, dl, PtrVT);
- return DAG.getNode(NVPTXISD::Wrapper, dl, PtrVT, Op);
-}
-
-std::string NVPTXTargetLowering::getPrototype(
- const DataLayout &DL, Type *retTy, const ArgListTy &Args,
- const SmallVectorImpl<ISD::OutputArg> &Outs, unsigned retAlignment,
- const ImmutableCallSite *CS) const {
- auto PtrVT = getPointerTy(DL);
-
- bool isABI = (STI.getSmVersion() >= 20);
- assert(isABI && "Non-ABI compilation is not supported");
- if (!isABI)
- return "";
-
- std::stringstream O;
- O << "prototype_" << uniqueCallSite << " : .callprototype ";
-
- if (retTy->getTypeID() == Type::VoidTyID) {
- O << "()";
- } else {
- O << "(";
- if (retTy->isFloatingPointTy() || retTy->isIntegerTy()) {
- unsigned size = 0;
- if (auto *ITy = dyn_cast<IntegerType>(retTy)) {
- size = ITy->getBitWidth();
- } else {
- assert(retTy->isFloatingPointTy() &&
- "Floating point type expected here");
- size = retTy->getPrimitiveSizeInBits();
- }
- // PTX ABI requires all scalar return values to be at least 32
- // bits in size. fp16 normally uses .b16 as its storage type in
- // PTX, so its size must be adjusted here, too.
- if (size < 32)
- size = 32;
-
- O << ".param .b" << size << " _";
- } else if (isa<PointerType>(retTy)) {
- O << ".param .b" << PtrVT.getSizeInBits() << " _";
- } else if (retTy->isAggregateType() || retTy->isVectorTy()) {
- auto &DL = CS->getCalledFunction()->getParent()->getDataLayout();
- O << ".param .align " << retAlignment << " .b8 _["
- << DL.getTypeAllocSize(retTy) << "]";
- } else {
- llvm_unreachable("Unknown return type");
- }
- O << ") ";
- }
- O << "_ (";
-
- bool first = true;
-
- unsigned OIdx = 0;
- for (unsigned i = 0, e = Args.size(); i != e; ++i, ++OIdx) {
- Type *Ty = Args[i].Ty;
- if (!first) {
- O << ", ";
- }
- first = false;
-
- if (!Outs[OIdx].Flags.isByVal()) {
- if (Ty->isAggregateType() || Ty->isVectorTy()) {
- unsigned align = 0;
- const CallInst *CallI = cast<CallInst>(CS->getInstruction());
- // +1 because index 0 is reserved for return type alignment
- if (!getAlign(*CallI, i + 1, align))
- align = DL.getABITypeAlignment(Ty);
- unsigned sz = DL.getTypeAllocSize(Ty);
- O << ".param .align " << align << " .b8 ";
- O << "_";
- O << "[" << sz << "]";
- // update the index for Outs
- SmallVector<EVT, 16> vtparts;
- ComputeValueVTs(*this, DL, Ty, vtparts);
- if (unsigned len = vtparts.size())
- OIdx += len - 1;
- continue;
- }
- // i8 types in IR will be i16 types in SDAG
- assert((getValueType(DL, Ty) == Outs[OIdx].VT ||
- (getValueType(DL, Ty) == MVT::i8 && Outs[OIdx].VT == MVT::i16)) &&
- "type mismatch between callee prototype and arguments");
- // scalar type
- unsigned sz = 0;
- if (isa<IntegerType>(Ty)) {
- sz = cast<IntegerType>(Ty)->getBitWidth();
- if (sz < 32)
- sz = 32;
- } else if (isa<PointerType>(Ty)) {
- sz = PtrVT.getSizeInBits();
- } else if (Ty->isHalfTy())
- // PTX ABI requires all scalar parameters to be at least 32
- // bits in size. fp16 normally uses .b16 as its storage type
- // in PTX, so its size must be adjusted here, too.
- sz = 32;
- else
- sz = Ty->getPrimitiveSizeInBits();
- O << ".param .b" << sz << " ";
- O << "_";
- continue;
- }
- auto *PTy = dyn_cast<PointerType>(Ty);
- assert(PTy && "Param with byval attribute should be a pointer type");
- Type *ETy = PTy->getElementType();
-
- unsigned align = Outs[OIdx].Flags.getByValAlign();
- unsigned sz = DL.getTypeAllocSize(ETy);
- O << ".param .align " << align << " .b8 ";
- O << "_";
- O << "[" << sz << "]";
- }
- O << ");";
- return O.str();
-}
-
-unsigned NVPTXTargetLowering::getArgumentAlignment(SDValue Callee,
- const ImmutableCallSite *CS,
- Type *Ty, unsigned Idx,
- const DataLayout &DL) const {
- if (!CS) {
- // CallSite is zero, fallback to ABI type alignment
- return DL.getABITypeAlignment(Ty);
- }
-
- unsigned Align = 0;
- const Value *DirectCallee = CS->getCalledFunction();
-
- if (!DirectCallee) {
- // We don't have a direct function symbol, but that may be because of
- // constant cast instructions in the call.
- const Instruction *CalleeI = CS->getInstruction();
- assert(CalleeI && "Call target is not a function or derived value?");
-
- // With bitcast'd call targets, the instruction will be the call
- if (isa<CallInst>(CalleeI)) {
- // Check if we have call alignment metadata
- if (getAlign(*cast<CallInst>(CalleeI), Idx, Align))
- return Align;
-
- const Value *CalleeV = cast<CallInst>(CalleeI)->getCalledValue();
- // Ignore any bitcast instructions
- while (isa<ConstantExpr>(CalleeV)) {
- const ConstantExpr *CE = cast<ConstantExpr>(CalleeV);
- if (!CE->isCast())
- break;
- // Look through the bitcast
- CalleeV = cast<ConstantExpr>(CalleeV)->getOperand(0);
- }
-
- // We have now looked past all of the bitcasts. Do we finally have a
- // Function?
- if (isa<Function>(CalleeV))
- DirectCallee = CalleeV;
- }
- }
-
- // Check for function alignment information if we found that the
- // ultimate target is a Function
- if (DirectCallee)
- if (getAlign(*cast<Function>(DirectCallee), Idx, Align))
- return Align;
-
- // Call is indirect or alignment information is not available, fall back to
- // the ABI type alignment
- return DL.getABITypeAlignment(Ty);
-}
-
-SDValue NVPTXTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
- SmallVectorImpl<SDValue> &InVals) const {
- SelectionDAG &DAG = CLI.DAG;
- SDLoc dl = CLI.DL;
- SmallVectorImpl<ISD::OutputArg> &Outs = CLI.Outs;
- SmallVectorImpl<SDValue> &OutVals = CLI.OutVals;
- SmallVectorImpl<ISD::InputArg> &Ins = CLI.Ins;
- SDValue Chain = CLI.Chain;
- SDValue Callee = CLI.Callee;
- bool &isTailCall = CLI.IsTailCall;
- ArgListTy &Args = CLI.getArgs();
- Type *RetTy = CLI.RetTy;
- ImmutableCallSite *CS = CLI.CS;
- const DataLayout &DL = DAG.getDataLayout();
-
- bool isABI = (STI.getSmVersion() >= 20);
- assert(isABI && "Non-ABI compilation is not supported");
- if (!isABI)
- return Chain;
-
- SDValue tempChain = Chain;
- Chain = DAG.getCALLSEQ_START(
- Chain, DAG.getIntPtrConstant(uniqueCallSite, dl, true), dl);
- SDValue InFlag = Chain.getValue(1);
-
- unsigned paramCount = 0;
- // Args.size() and Outs.size() need not match.
- // Outs.size() will be larger
- // * if there is an aggregate argument with multiple fields (each field
- // showing up separately in Outs)
- // * if there is a vector argument with more than typical vector-length
- // elements (generally if more than 4) where each vector element is
- // individually present in Outs.
- // So a different index should be used for indexing into Outs/OutVals.
- // See similar issue in LowerFormalArguments.
- unsigned OIdx = 0;
- // Declare the .params or .reg need to pass values
- // to the function
- for (unsigned i = 0, e = Args.size(); i != e; ++i, ++OIdx) {
- EVT VT = Outs[OIdx].VT;
- Type *Ty = Args[i].Ty;
-
- if (!Outs[OIdx].Flags.isByVal()) {
- SmallVector<EVT, 16> VTs;
- SmallVector<uint64_t, 16> Offsets;
- ComputePTXValueVTs(*this, DL, Ty, VTs, &Offsets);
- unsigned ArgAlign =
- getArgumentAlignment(Callee, CS, Ty, paramCount + 1, DL);
- unsigned AllocSize = DL.getTypeAllocSize(Ty);
- SDVTList DeclareParamVTs = DAG.getVTList(MVT::Other, MVT::Glue);
- bool NeedAlign; // Does argument declaration specify alignment?
- if (Ty->isAggregateType() || Ty->isVectorTy()) {
- // declare .param .align <align> .b8 .param<n>[<size>];
- SDValue DeclareParamOps[] = {
- Chain, DAG.getConstant(ArgAlign, dl, MVT::i32),
- DAG.getConstant(paramCount, dl, MVT::i32),
- DAG.getConstant(AllocSize, dl, MVT::i32), InFlag};
- Chain = DAG.getNode(NVPTXISD::DeclareParam, dl, DeclareParamVTs,
- DeclareParamOps);
- NeedAlign = true;
- } else {
- // declare .param .b<size> .param<n>;
- if ((VT.isInteger() || VT.isFloatingPoint()) && AllocSize < 4) {
- // PTX ABI requires integral types to be at least 32 bits in
- // size. FP16 is loaded/stored using i16, so it's handled
- // here as well.
- AllocSize = 4;
- }
- SDValue DeclareScalarParamOps[] = {
- Chain, DAG.getConstant(paramCount, dl, MVT::i32),
- DAG.getConstant(AllocSize * 8, dl, MVT::i32),
- DAG.getConstant(0, dl, MVT::i32), InFlag};
- Chain = DAG.getNode(NVPTXISD::DeclareScalarParam, dl, DeclareParamVTs,
- DeclareScalarParamOps);
- NeedAlign = false;
- }
- InFlag = Chain.getValue(1);
-
- // PTX Interoperability Guide 3.3(A): [Integer] Values shorter
- // than 32-bits are sign extended or zero extended, depending on
- // whether they are signed or unsigned types. This case applies
- // only to scalar parameters and not to aggregate values.
- bool ExtendIntegerParam =
- Ty->isIntegerTy() && DL.getTypeAllocSizeInBits(Ty) < 32;
-
- auto VectorInfo = VectorizePTXValueVTs(VTs, Offsets, ArgAlign);
- SmallVector<SDValue, 6> StoreOperands;
- for (unsigned j = 0, je = VTs.size(); j != je; ++j) {
- // New store.
- if (VectorInfo[j] & PVF_FIRST) {
- assert(StoreOperands.empty() && "Unfinished preceeding store.");
- StoreOperands.push_back(Chain);
- StoreOperands.push_back(DAG.getConstant(paramCount, dl, MVT::i32));
- StoreOperands.push_back(DAG.getConstant(Offsets[j], dl, MVT::i32));
- }
-
- EVT EltVT = VTs[j];
- SDValue StVal = OutVals[OIdx];
- if (ExtendIntegerParam) {
- assert(VTs.size() == 1 && "Scalar can't have multiple parts.");
- // zext/sext to i32
- StVal = DAG.getNode(Outs[OIdx].Flags.isSExt() ? ISD::SIGN_EXTEND
- : ISD::ZERO_EXTEND,
- dl, MVT::i32, StVal);
- } else if (EltVT.getSizeInBits() < 16) {
- // Use 16-bit registers for small stores as it's the
- // smallest general purpose register size supported by NVPTX.
- StVal = DAG.getNode(ISD::ANY_EXTEND, dl, MVT::i16, StVal);
- }
-
- // Record the value to store.
- StoreOperands.push_back(StVal);
-
- if (VectorInfo[j] & PVF_LAST) {
- unsigned NumElts = StoreOperands.size() - 3;
- NVPTXISD::NodeType Op;
- switch (NumElts) {
- case 1:
- Op = NVPTXISD::StoreParam;
- break;
- case 2:
- Op = NVPTXISD::StoreParamV2;
- break;
- case 4:
- Op = NVPTXISD::StoreParamV4;
- break;
- default:
- llvm_unreachable("Invalid vector info.");
- }
-
- StoreOperands.push_back(InFlag);
-
- // Adjust type of the store op if we've extended the scalar
- // return value.
- EVT TheStoreType = ExtendIntegerParam ? MVT::i32 : VTs[j];
- unsigned EltAlign =
- NeedAlign ? GreatestCommonDivisor64(ArgAlign, Offsets[j]) : 0;
-
- Chain = DAG.getMemIntrinsicNode(
- Op, dl, DAG.getVTList(MVT::Other, MVT::Glue), StoreOperands,
- TheStoreType, MachinePointerInfo(), EltAlign);
- InFlag = Chain.getValue(1);
-
- // Cleanup.
- StoreOperands.clear();
- }
- ++OIdx;
- }
- assert(StoreOperands.empty() && "Unfinished parameter store.");
- if (VTs.size() > 0)
- --OIdx;
- ++paramCount;
- continue;
- }
-
- // ByVal arguments
- SmallVector<EVT, 16> VTs;
- SmallVector<uint64_t, 16> Offsets;
- auto *PTy = dyn_cast<PointerType>(Args[i].Ty);
- assert(PTy && "Type of a byval parameter should be pointer");
- ComputePTXValueVTs(*this, DL, PTy->getElementType(), VTs, &Offsets, 0);
-
- // declare .param .align <align> .b8 .param<n>[<size>];
- unsigned sz = Outs[OIdx].Flags.getByValSize();
- SDVTList DeclareParamVTs = DAG.getVTList(MVT::Other, MVT::Glue);
- unsigned ArgAlign = Outs[OIdx].Flags.getByValAlign();
- // The ByValAlign in the Outs[OIdx].Flags is alway set at this point,
- // so we don't need to worry about natural alignment or not.
- // See TargetLowering::LowerCallTo().
-
- // Enforce minumum alignment of 4 to work around ptxas miscompile
- // for sm_50+. See corresponding alignment adjustment in
- // emitFunctionParamList() for details.
- if (ArgAlign < 4)
- ArgAlign = 4;
- SDValue DeclareParamOps[] = {Chain, DAG.getConstant(ArgAlign, dl, MVT::i32),
- DAG.getConstant(paramCount, dl, MVT::i32),
- DAG.getConstant(sz, dl, MVT::i32), InFlag};
- Chain = DAG.getNode(NVPTXISD::DeclareParam, dl, DeclareParamVTs,
- DeclareParamOps);
- InFlag = Chain.getValue(1);
- for (unsigned j = 0, je = VTs.size(); j != je; ++j) {
- EVT elemtype = VTs[j];
- int curOffset = Offsets[j];
- unsigned PartAlign = GreatestCommonDivisor64(ArgAlign, curOffset);
- auto PtrVT = getPointerTy(DL);
- SDValue srcAddr = DAG.getNode(ISD::ADD, dl, PtrVT, OutVals[OIdx],
- DAG.getConstant(curOffset, dl, PtrVT));
- SDValue theVal = DAG.getLoad(elemtype, dl, tempChain, srcAddr,
- MachinePointerInfo(), PartAlign);
- if (elemtype.getSizeInBits() < 16) {
- theVal = DAG.getNode(ISD::ANY_EXTEND, dl, MVT::i16, theVal);
- }
- SDVTList CopyParamVTs = DAG.getVTList(MVT::Other, MVT::Glue);
- SDValue CopyParamOps[] = { Chain,
- DAG.getConstant(paramCount, dl, MVT::i32),
- DAG.getConstant(curOffset, dl, MVT::i32),
- theVal, InFlag };
- Chain = DAG.getMemIntrinsicNode(NVPTXISD::StoreParam, dl, CopyParamVTs,
- CopyParamOps, elemtype,
- MachinePointerInfo());
-
- InFlag = Chain.getValue(1);
- }
- ++paramCount;
- }
-
- GlobalAddressSDNode *Func = dyn_cast<GlobalAddressSDNode>(Callee.getNode());
- unsigned retAlignment = 0;
-
- // Handle Result
- if (Ins.size() > 0) {
- SmallVector<EVT, 16> resvtparts;
- ComputeValueVTs(*this, DL, RetTy, resvtparts);
-
- // Declare
- // .param .align 16 .b8 retval0[<size-in-bytes>], or
- // .param .b<size-in-bits> retval0
- unsigned resultsz = DL.getTypeAllocSizeInBits(RetTy);
- // Emit ".param .b<size-in-bits> retval0" instead of byte arrays only for
- // these three types to match the logic in
- // NVPTXAsmPrinter::printReturnValStr and NVPTXTargetLowering::getPrototype.
- // Plus, this behavior is consistent with nvcc's.
- if (RetTy->isFloatingPointTy() || RetTy->isIntegerTy() ||
- RetTy->isPointerTy()) {
- // Scalar needs to be at least 32bit wide
- if (resultsz < 32)
- resultsz = 32;
- SDVTList DeclareRetVTs = DAG.getVTList(MVT::Other, MVT::Glue);
- SDValue DeclareRetOps[] = { Chain, DAG.getConstant(1, dl, MVT::i32),
- DAG.getConstant(resultsz, dl, MVT::i32),
- DAG.getConstant(0, dl, MVT::i32), InFlag };
- Chain = DAG.getNode(NVPTXISD::DeclareRet, dl, DeclareRetVTs,
- DeclareRetOps);
- InFlag = Chain.getValue(1);
- } else {
- retAlignment = getArgumentAlignment(Callee, CS, RetTy, 0, DL);
- SDVTList DeclareRetVTs = DAG.getVTList(MVT::Other, MVT::Glue);
- SDValue DeclareRetOps[] = { Chain,
- DAG.getConstant(retAlignment, dl, MVT::i32),
- DAG.getConstant(resultsz / 8, dl, MVT::i32),
- DAG.getConstant(0, dl, MVT::i32), InFlag };
- Chain = DAG.getNode(NVPTXISD::DeclareRetParam, dl, DeclareRetVTs,
- DeclareRetOps);
- InFlag = Chain.getValue(1);
- }
- }
-
- if (!Func) {
- // This is indirect function call case : PTX requires a prototype of the
- // form
- // proto_0 : .callprototype(.param .b32 _) _ (.param .b32 _);
- // to be emitted, and the label has to used as the last arg of call
- // instruction.
- // The prototype is embedded in a string and put as the operand for a
- // CallPrototype SDNode which will print out to the value of the string.
- SDVTList ProtoVTs = DAG.getVTList(MVT::Other, MVT::Glue);
- std::string Proto = getPrototype(DL, RetTy, Args, Outs, retAlignment, CS);
- const char *ProtoStr =
- nvTM->getManagedStrPool()->getManagedString(Proto.c_str())->c_str();
- SDValue ProtoOps[] = {
- Chain, DAG.getTargetExternalSymbol(ProtoStr, MVT::i32), InFlag,
- };
- Chain = DAG.getNode(NVPTXISD::CallPrototype, dl, ProtoVTs, ProtoOps);
- InFlag = Chain.getValue(1);
- }
- // Op to just print "call"
- SDVTList PrintCallVTs = DAG.getVTList(MVT::Other, MVT::Glue);
- SDValue PrintCallOps[] = {
- Chain, DAG.getConstant((Ins.size() == 0) ? 0 : 1, dl, MVT::i32), InFlag
- };
- // We model convergent calls as separate opcodes.
- unsigned Opcode = Func ? NVPTXISD::PrintCallUni : NVPTXISD::PrintCall;
- if (CLI.IsConvergent)
- Opcode = Opcode == NVPTXISD::PrintCallUni ? NVPTXISD::PrintConvergentCallUni
- : NVPTXISD::PrintConvergentCall;
- Chain = DAG.getNode(Opcode, dl, PrintCallVTs, PrintCallOps);
- InFlag = Chain.getValue(1);
-
- // Ops to print out the function name
- SDVTList CallVoidVTs = DAG.getVTList(MVT::Other, MVT::Glue);
- SDValue CallVoidOps[] = { Chain, Callee, InFlag };
- Chain = DAG.getNode(NVPTXISD::CallVoid, dl, CallVoidVTs, CallVoidOps);
- InFlag = Chain.getValue(1);
-
- // Ops to print out the param list
- SDVTList CallArgBeginVTs = DAG.getVTList(MVT::Other, MVT::Glue);
- SDValue CallArgBeginOps[] = { Chain, InFlag };
- Chain = DAG.getNode(NVPTXISD::CallArgBegin, dl, CallArgBeginVTs,
- CallArgBeginOps);
- InFlag = Chain.getValue(1);
-
- for (unsigned i = 0, e = paramCount; i != e; ++i) {
- unsigned opcode;
- if (i == (e - 1))
- opcode = NVPTXISD::LastCallArg;
- else
- opcode = NVPTXISD::CallArg;
- SDVTList CallArgVTs = DAG.getVTList(MVT::Other, MVT::Glue);
- SDValue CallArgOps[] = { Chain, DAG.getConstant(1, dl, MVT::i32),
- DAG.getConstant(i, dl, MVT::i32), InFlag };
- Chain = DAG.getNode(opcode, dl, CallArgVTs, CallArgOps);
- InFlag = Chain.getValue(1);
- }
- SDVTList CallArgEndVTs = DAG.getVTList(MVT::Other, MVT::Glue);
- SDValue CallArgEndOps[] = { Chain,
- DAG.getConstant(Func ? 1 : 0, dl, MVT::i32),
- InFlag };
- Chain = DAG.getNode(NVPTXISD::CallArgEnd, dl, CallArgEndVTs, CallArgEndOps);
- InFlag = Chain.getValue(1);
-
- if (!Func) {
- SDVTList PrototypeVTs = DAG.getVTList(MVT::Other, MVT::Glue);
- SDValue PrototypeOps[] = { Chain,
- DAG.getConstant(uniqueCallSite, dl, MVT::i32),
- InFlag };
- Chain = DAG.getNode(NVPTXISD::Prototype, dl, PrototypeVTs, PrototypeOps);
- InFlag = Chain.getValue(1);
- }
-
- // Generate loads from param memory/moves from registers for result
- if (Ins.size() > 0) {
- SmallVector<EVT, 16> VTs;
- SmallVector<uint64_t, 16> Offsets;
- ComputePTXValueVTs(*this, DL, RetTy, VTs, &Offsets, 0);
- assert(VTs.size() == Ins.size() && "Bad value decomposition");
-
- unsigned RetAlign = getArgumentAlignment(Callee, CS, RetTy, 0, DL);
- auto VectorInfo = VectorizePTXValueVTs(VTs, Offsets, RetAlign);
-
- SmallVector<EVT, 6> LoadVTs;
- int VecIdx = -1; // Index of the first element of the vector.
-
- // PTX Interoperability Guide 3.3(A): [Integer] Values shorter than
- // 32-bits are sign extended or zero extended, depending on whether
- // they are signed or unsigned types.
- bool ExtendIntegerRetVal =
- RetTy->isIntegerTy() && DL.getTypeAllocSizeInBits(RetTy) < 32;
-
- for (unsigned i = 0, e = VTs.size(); i != e; ++i) {
- bool needTruncate = false;
- EVT TheLoadType = VTs[i];
- EVT EltType = Ins[i].VT;
- unsigned EltAlign = GreatestCommonDivisor64(RetAlign, Offsets[i]);
- if (ExtendIntegerRetVal) {
- TheLoadType = MVT::i32;
- EltType = MVT::i32;
- needTruncate = true;
- } else if (TheLoadType.getSizeInBits() < 16) {
- if (VTs[i].isInteger())
- needTruncate = true;
- EltType = MVT::i16;
- }
-
- // Record index of the very first element of the vector.
- if (VectorInfo[i] & PVF_FIRST) {
- assert(VecIdx == -1 && LoadVTs.empty() && "Orphaned operand list.");
- VecIdx = i;
- }
-
- LoadVTs.push_back(EltType);
-
- if (VectorInfo[i] & PVF_LAST) {
- unsigned NumElts = LoadVTs.size();
- LoadVTs.push_back(MVT::Other);
- LoadVTs.push_back(MVT::Glue);
- NVPTXISD::NodeType Op;
- switch (NumElts) {
- case 1:
- Op = NVPTXISD::LoadParam;
- break;
- case 2:
- Op = NVPTXISD::LoadParamV2;
- break;
- case 4:
- Op = NVPTXISD::LoadParamV4;
- break;
- default:
- llvm_unreachable("Invalid vector info.");
- }
-
- SDValue LoadOperands[] = {
- Chain, DAG.getConstant(1, dl, MVT::i32),
- DAG.getConstant(Offsets[VecIdx], dl, MVT::i32), InFlag};
- SDValue RetVal = DAG.getMemIntrinsicNode(
- Op, dl, DAG.getVTList(LoadVTs), LoadOperands, TheLoadType,
- MachinePointerInfo(), EltAlign);
-
- for (unsigned j = 0; j < NumElts; ++j) {
- SDValue Ret = RetVal.getValue(j);
- if (needTruncate)
- Ret = DAG.getNode(ISD::TRUNCATE, dl, Ins[VecIdx + j].VT, Ret);
- InVals.push_back(Ret);
- }
- Chain = RetVal.getValue(NumElts);
- InFlag = RetVal.getValue(NumElts + 1);
-
- // Cleanup
- VecIdx = -1;
- LoadVTs.clear();
- }
- }
- }
-
- Chain = DAG.getCALLSEQ_END(Chain,
- DAG.getIntPtrConstant(uniqueCallSite, dl, true),
- DAG.getIntPtrConstant(uniqueCallSite + 1, dl,
- true),
- InFlag, dl);
- uniqueCallSite++;
-
- // set isTailCall to false for now, until we figure out how to express
- // tail call optimization in PTX
- isTailCall = false;
- return Chain;
-}
-
-// By default CONCAT_VECTORS is lowered by ExpandVectorBuildThroughStack()
-// (see LegalizeDAG.cpp). This is slow and uses local memory.
-// We use extract/insert/build vector just as what LegalizeOp() does in llvm 2.5
-SDValue
-NVPTXTargetLowering::LowerCONCAT_VECTORS(SDValue Op, SelectionDAG &DAG) const {
- SDNode *Node = Op.getNode();
- SDLoc dl(Node);
- SmallVector<SDValue, 8> Ops;
- unsigned NumOperands = Node->getNumOperands();
- for (unsigned i = 0; i < NumOperands; ++i) {
- SDValue SubOp = Node->getOperand(i);
- EVT VVT = SubOp.getNode()->getValueType(0);
- EVT EltVT = VVT.getVectorElementType();
- unsigned NumSubElem = VVT.getVectorNumElements();
- for (unsigned j = 0; j < NumSubElem; ++j) {
- Ops.push_back(DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, EltVT, SubOp,
- DAG.getIntPtrConstant(j, dl)));
- }
- }
- return DAG.getBuildVector(Node->getValueType(0), dl, Ops);
-}
-
-// We can init constant f16x2 with a single .b32 move. Normally it
-// would get lowered as two constant loads and vector-packing move.
-// mov.b16 %h1, 0x4000;
-// mov.b16 %h2, 0x3C00;
-// mov.b32 %hh2, {%h2, %h1};
-// Instead we want just a constant move:
-// mov.b32 %hh2, 0x40003C00
-//
-// This results in better SASS code with CUDA 7.x. Ptxas in CUDA 8.0
-// generates good SASS in both cases.
-SDValue NVPTXTargetLowering::LowerBUILD_VECTOR(SDValue Op,
- SelectionDAG &DAG) const {
- //return Op;
- if (!(Op->getValueType(0) == MVT::v2f16 &&
- isa<ConstantFPSDNode>(Op->getOperand(0)) &&
- isa<ConstantFPSDNode>(Op->getOperand(1))))
- return Op;
-
- APInt E0 =
- cast<ConstantFPSDNode>(Op->getOperand(0))->getValueAPF().bitcastToAPInt();
- APInt E1 =
- cast<ConstantFPSDNode>(Op->getOperand(1))->getValueAPF().bitcastToAPInt();
- SDValue Const =
- DAG.getConstant(E1.zext(32).shl(16) | E0.zext(32), SDLoc(Op), MVT::i32);
- return DAG.getNode(ISD::BITCAST, SDLoc(Op), MVT::v2f16, Const);
-}
-
-SDValue NVPTXTargetLowering::LowerEXTRACT_VECTOR_ELT(SDValue Op,
- SelectionDAG &DAG) const {
- SDValue Index = Op->getOperand(1);
- // Constant index will be matched by tablegen.
- if (isa<ConstantSDNode>(Index.getNode()))
- return Op;
-
- // Extract individual elements and select one of them.
- SDValue Vector = Op->getOperand(0);
- EVT VectorVT = Vector.getValueType();
- assert(VectorVT == MVT::v2f16 && "Unexpected vector type.");
- EVT EltVT = VectorVT.getVectorElementType();
-
- SDLoc dl(Op.getNode());
- SDValue E0 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, EltVT, Vector,
- DAG.getIntPtrConstant(0, dl));
- SDValue E1 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, EltVT, Vector,
- DAG.getIntPtrConstant(1, dl));
- return DAG.getSelectCC(dl, Index, DAG.getIntPtrConstant(0, dl), E0, E1,
- ISD::CondCode::SETEQ);
-}
-
-/// LowerShiftRightParts - Lower SRL_PARTS, SRA_PARTS, which
-/// 1) returns two i32 values and take a 2 x i32 value to shift plus a shift
-/// amount, or
-/// 2) returns two i64 values and take a 2 x i64 value to shift plus a shift
-/// amount.
-SDValue NVPTXTargetLowering::LowerShiftRightParts(SDValue Op,
- SelectionDAG &DAG) const {
- assert(Op.getNumOperands() == 3 && "Not a double-shift!");
- assert(Op.getOpcode() == ISD::SRA_PARTS || Op.getOpcode() == ISD::SRL_PARTS);
-
- EVT VT = Op.getValueType();
- unsigned VTBits = VT.getSizeInBits();
- SDLoc dl(Op);
- SDValue ShOpLo = Op.getOperand(0);
- SDValue ShOpHi = Op.getOperand(1);
- SDValue ShAmt = Op.getOperand(2);
- unsigned Opc = (Op.getOpcode() == ISD::SRA_PARTS) ? ISD::SRA : ISD::SRL;
-
- if (VTBits == 32 && STI.getSmVersion() >= 35) {
- // For 32bit and sm35, we can use the funnel shift 'shf' instruction.
- // {dHi, dLo} = {aHi, aLo} >> Amt
- // dHi = aHi >> Amt
- // dLo = shf.r.clamp aLo, aHi, Amt
-
- SDValue Hi = DAG.getNode(Opc, dl, VT, ShOpHi, ShAmt);
- SDValue Lo = DAG.getNode(NVPTXISD::FUN_SHFR_CLAMP, dl, VT, ShOpLo, ShOpHi,
- ShAmt);
-
- SDValue Ops[2] = { Lo, Hi };
- return DAG.getMergeValues(Ops, dl);
- }
- else {
- // {dHi, dLo} = {aHi, aLo} >> Amt
- // - if (Amt>=size) then
- // dLo = aHi >> (Amt-size)
- // dHi = aHi >> Amt (this is either all 0 or all 1)
- // else
- // dLo = (aLo >>logic Amt) | (aHi << (size-Amt))
- // dHi = aHi >> Amt
-
- SDValue RevShAmt = DAG.getNode(ISD::SUB, dl, MVT::i32,
- DAG.getConstant(VTBits, dl, MVT::i32),
- ShAmt);
- SDValue Tmp1 = DAG.getNode(ISD::SRL, dl, VT, ShOpLo, ShAmt);
- SDValue ExtraShAmt = DAG.getNode(ISD::SUB, dl, MVT::i32, ShAmt,
- DAG.getConstant(VTBits, dl, MVT::i32));
- SDValue Tmp2 = DAG.getNode(ISD::SHL, dl, VT, ShOpHi, RevShAmt);
- SDValue FalseVal = DAG.getNode(ISD::OR, dl, VT, Tmp1, Tmp2);
- SDValue TrueVal = DAG.getNode(Opc, dl, VT, ShOpHi, ExtraShAmt);
-
- SDValue Cmp = DAG.getSetCC(dl, MVT::i1, ShAmt,
- DAG.getConstant(VTBits, dl, MVT::i32),
- ISD::SETGE);
- SDValue Hi = DAG.getNode(Opc, dl, VT, ShOpHi, ShAmt);
- SDValue Lo = DAG.getNode(ISD::SELECT, dl, VT, Cmp, TrueVal, FalseVal);
-
- SDValue Ops[2] = { Lo, Hi };
- return DAG.getMergeValues(Ops, dl);
- }
-}
-
-/// LowerShiftLeftParts - Lower SHL_PARTS, which
-/// 1) returns two i32 values and take a 2 x i32 value to shift plus a shift
-/// amount, or
-/// 2) returns two i64 values and take a 2 x i64 value to shift plus a shift
-/// amount.
-SDValue NVPTXTargetLowering::LowerShiftLeftParts(SDValue Op,
- SelectionDAG &DAG) const {
- assert(Op.getNumOperands() == 3 && "Not a double-shift!");
- assert(Op.getOpcode() == ISD::SHL_PARTS);
-
- EVT VT = Op.getValueType();
- unsigned VTBits = VT.getSizeInBits();
- SDLoc dl(Op);
- SDValue ShOpLo = Op.getOperand(0);
- SDValue ShOpHi = Op.getOperand(1);
- SDValue ShAmt = Op.getOperand(2);
-
- if (VTBits == 32 && STI.getSmVersion() >= 35) {
- // For 32bit and sm35, we can use the funnel shift 'shf' instruction.
- // {dHi, dLo} = {aHi, aLo} << Amt
- // dHi = shf.l.clamp aLo, aHi, Amt
- // dLo = aLo << Amt
-
- SDValue Hi = DAG.getNode(NVPTXISD::FUN_SHFL_CLAMP, dl, VT, ShOpLo, ShOpHi,
- ShAmt);
- SDValue Lo = DAG.getNode(ISD::SHL, dl, VT, ShOpLo, ShAmt);
-
- SDValue Ops[2] = { Lo, Hi };
- return DAG.getMergeValues(Ops, dl);
- }
- else {
- // {dHi, dLo} = {aHi, aLo} << Amt
- // - if (Amt>=size) then
- // dLo = aLo << Amt (all 0)
- // dLo = aLo << (Amt-size)
- // else
- // dLo = aLo << Amt
- // dHi = (aHi << Amt) | (aLo >> (size-Amt))
-
- SDValue RevShAmt = DAG.getNode(ISD::SUB, dl, MVT::i32,
- DAG.getConstant(VTBits, dl, MVT::i32),
- ShAmt);
- SDValue Tmp1 = DAG.getNode(ISD::SHL, dl, VT, ShOpHi, ShAmt);
- SDValue ExtraShAmt = DAG.getNode(ISD::SUB, dl, MVT::i32, ShAmt,
- DAG.getConstant(VTBits, dl, MVT::i32));
- SDValue Tmp2 = DAG.getNode(ISD::SRL, dl, VT, ShOpLo, RevShAmt);
- SDValue FalseVal = DAG.getNode(ISD::OR, dl, VT, Tmp1, Tmp2);
- SDValue TrueVal = DAG.getNode(ISD::SHL, dl, VT, ShOpLo, ExtraShAmt);
-
- SDValue Cmp = DAG.getSetCC(dl, MVT::i1, ShAmt,
- DAG.getConstant(VTBits, dl, MVT::i32),
- ISD::SETGE);
- SDValue Lo = DAG.getNode(ISD::SHL, dl, VT, ShOpLo, ShAmt);
- SDValue Hi = DAG.getNode(ISD::SELECT, dl, VT, Cmp, TrueVal, FalseVal);
-
- SDValue Ops[2] = { Lo, Hi };
- return DAG.getMergeValues(Ops, dl);
- }
-}
-
-SDValue
-NVPTXTargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) const {
- switch (Op.getOpcode()) {
- case ISD::RETURNADDR:
- return SDValue();
- case ISD::FRAMEADDR:
- return SDValue();
- case ISD::GlobalAddress:
- return LowerGlobalAddress(Op, DAG);
- case ISD::INTRINSIC_W_CHAIN:
- return Op;
- case ISD::BUILD_VECTOR:
- return LowerBUILD_VECTOR(Op, DAG);
- case ISD::EXTRACT_SUBVECTOR:
- return Op;
- case ISD::EXTRACT_VECTOR_ELT:
- return LowerEXTRACT_VECTOR_ELT(Op, DAG);
- case ISD::CONCAT_VECTORS:
- return LowerCONCAT_VECTORS(Op, DAG);
- case ISD::STORE:
- return LowerSTORE(Op, DAG);
- case ISD::LOAD:
- return LowerLOAD(Op, DAG);
- case ISD::SHL_PARTS:
- return LowerShiftLeftParts(Op, DAG);
- case ISD::SRA_PARTS:
- case ISD::SRL_PARTS:
- return LowerShiftRightParts(Op, DAG);
- case ISD::SELECT:
- return LowerSelect(Op, DAG);
- default:
- llvm_unreachable("Custom lowering not defined for operation");
- }
-}
-
-SDValue NVPTXTargetLowering::LowerSelect(SDValue Op, SelectionDAG &DAG) const {
- SDValue Op0 = Op->getOperand(0);
- SDValue Op1 = Op->getOperand(1);
- SDValue Op2 = Op->getOperand(2);
- SDLoc DL(Op.getNode());
-
- assert(Op.getValueType() == MVT::i1 && "Custom lowering enabled only for i1");
-
- Op1 = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i32, Op1);
- Op2 = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i32, Op2);
- SDValue Select = DAG.getNode(ISD::SELECT, DL, MVT::i32, Op0, Op1, Op2);
- SDValue Trunc = DAG.getNode(ISD::TRUNCATE, DL, MVT::i1, Select);
-
- return Trunc;
-}
-
-SDValue NVPTXTargetLowering::LowerLOAD(SDValue Op, SelectionDAG &DAG) const {
- if (Op.getValueType() == MVT::i1)
- return LowerLOADi1(Op, DAG);
-
- // v2f16 is legal, so we can't rely on legalizer to handle unaligned
- // loads and have to handle it here.
- if (Op.getValueType() == MVT::v2f16) {
- LoadSDNode *Load = cast<LoadSDNode>(Op);
- EVT MemVT = Load->getMemoryVT();
- if (!allowsMemoryAccess(*DAG.getContext(), DAG.getDataLayout(), MemVT,
- Load->getAddressSpace(), Load->getAlignment())) {
- SDValue Ops[2];
- std::tie(Ops[0], Ops[1]) = expandUnalignedLoad(Load, DAG);
- return DAG.getMergeValues(Ops, SDLoc(Op));
- }
- }
-
- return SDValue();
-}
-
-// v = ld i1* addr
-// =>
-// v1 = ld i8* addr (-> i16)
-// v = trunc i16 to i1
-SDValue NVPTXTargetLowering::LowerLOADi1(SDValue Op, SelectionDAG &DAG) const {
- SDNode *Node = Op.getNode();
- LoadSDNode *LD = cast<LoadSDNode>(Node);
- SDLoc dl(Node);
- assert(LD->getExtensionType() == ISD::NON_EXTLOAD);
- assert(Node->getValueType(0) == MVT::i1 &&
- "Custom lowering for i1 load only");
- SDValue newLD = DAG.getLoad(MVT::i16, dl, LD->getChain(), LD->getBasePtr(),
- LD->getPointerInfo(), LD->getAlignment(),
- LD->getMemOperand()->getFlags());
- SDValue result = DAG.getNode(ISD::TRUNCATE, dl, MVT::i1, newLD);
- // The legalizer (the caller) is expecting two values from the legalized
- // load, so we build a MergeValues node for it. See ExpandUnalignedLoad()
- // in LegalizeDAG.cpp which also uses MergeValues.
- SDValue Ops[] = { result, LD->getChain() };
- return DAG.getMergeValues(Ops, dl);
-}
-
-SDValue NVPTXTargetLowering::LowerSTORE(SDValue Op, SelectionDAG &DAG) const {
- StoreSDNode *Store = cast<StoreSDNode>(Op);
- EVT VT = Store->getMemoryVT();
-
- if (VT == MVT::i1)
- return LowerSTOREi1(Op, DAG);
-
- // v2f16 is legal, so we can't rely on legalizer to handle unaligned
- // stores and have to handle it here.
- if (VT == MVT::v2f16 &&
- !allowsMemoryAccess(*DAG.getContext(), DAG.getDataLayout(), VT,
- Store->getAddressSpace(), Store->getAlignment()))
- return expandUnalignedStore(Store, DAG);
-
- if (VT.isVector())
- return LowerSTOREVector(Op, DAG);
-
- return SDValue();
-}
-
-SDValue
-NVPTXTargetLowering::LowerSTOREVector(SDValue Op, SelectionDAG &DAG) const {
- SDNode *N = Op.getNode();
- SDValue Val = N->getOperand(1);
- SDLoc DL(N);
- EVT ValVT = Val.getValueType();
-
- if (ValVT.isVector()) {
- // We only handle "native" vector sizes for now, e.g. <4 x double> is not
- // legal. We can (and should) split that into 2 stores of <2 x double> here
- // but I'm leaving that as a TODO for now.
- if (!ValVT.isSimple())
- return SDValue();
- switch (ValVT.getSimpleVT().SimpleTy) {
- default:
- return SDValue();
- case MVT::v2i8:
- case MVT::v2i16:
- case MVT::v2i32:
- case MVT::v2i64:
- case MVT::v2f16:
- case MVT::v2f32:
- case MVT::v2f64:
- case MVT::v4i8:
- case MVT::v4i16:
- case MVT::v4i32:
- case MVT::v4f16:
- case MVT::v4f32:
- case MVT::v8f16: // <4 x f16x2>
- // This is a "native" vector type
- break;
- }
-
- MemSDNode *MemSD = cast<MemSDNode>(N);
- const DataLayout &TD = DAG.getDataLayout();
-
- unsigned Align = MemSD->getAlignment();
- unsigned PrefAlign =
- TD.getPrefTypeAlignment(ValVT.getTypeForEVT(*DAG.getContext()));
- if (Align < PrefAlign) {
- // This store is not sufficiently aligned, so bail out and let this vector
- // store be scalarized. Note that we may still be able to emit smaller
- // vector stores. For example, if we are storing a <4 x float> with an
- // alignment of 8, this check will fail but the legalizer will try again
- // with 2 x <2 x float>, which will succeed with an alignment of 8.
- return SDValue();
- }
-
- unsigned Opcode = 0;
- EVT EltVT = ValVT.getVectorElementType();
- unsigned NumElts = ValVT.getVectorNumElements();
-
- // Since StoreV2 is a target node, we cannot rely on DAG type legalization.
- // Therefore, we must ensure the type is legal. For i1 and i8, we set the
- // stored type to i16 and propagate the "real" type as the memory type.
- bool NeedExt = false;
- if (EltVT.getSizeInBits() < 16)
- NeedExt = true;
-
- bool StoreF16x2 = false;
- switch (NumElts) {
- default:
- return SDValue();
- case 2:
- Opcode = NVPTXISD::StoreV2;
- break;
- case 4:
- Opcode = NVPTXISD::StoreV4;
- break;
- case 8:
- // v8f16 is a special case. PTX doesn't have st.v8.f16
- // instruction. Instead, we split the vector into v2f16 chunks and
- // store them with st.v4.b32.
- assert(EltVT == MVT::f16 && "Wrong type for the vector.");
- Opcode = NVPTXISD::StoreV4;
- StoreF16x2 = true;
- break;
- }
-
- SmallVector<SDValue, 8> Ops;
-
- // First is the chain
- Ops.push_back(N->getOperand(0));
-
- if (StoreF16x2) {
- // Combine f16,f16 -> v2f16
- NumElts /= 2;
- for (unsigned i = 0; i < NumElts; ++i) {
- SDValue E0 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::f16, Val,
- DAG.getIntPtrConstant(i * 2, DL));
- SDValue E1 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::f16, Val,
- DAG.getIntPtrConstant(i * 2 + 1, DL));
- SDValue V2 = DAG.getNode(ISD::BUILD_VECTOR, DL, MVT::v2f16, E0, E1);
- Ops.push_back(V2);
- }
- } else {
- // Then the split values
- for (unsigned i = 0; i < NumElts; ++i) {
- SDValue ExtVal = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, EltVT, Val,
- DAG.getIntPtrConstant(i, DL));
- if (NeedExt)
- ExtVal = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i16, ExtVal);
- Ops.push_back(ExtVal);
- }
- }
-
- // Then any remaining arguments
- Ops.append(N->op_begin() + 2, N->op_end());
-
- SDValue NewSt =
- DAG.getMemIntrinsicNode(Opcode, DL, DAG.getVTList(MVT::Other), Ops,
- MemSD->getMemoryVT(), MemSD->getMemOperand());
-
- // return DCI.CombineTo(N, NewSt, true);
- return NewSt;
- }
-
- return SDValue();
-}
-
-// st i1 v, addr
-// =>
-// v1 = zxt v to i16
-// st.u8 i16, addr
-SDValue NVPTXTargetLowering::LowerSTOREi1(SDValue Op, SelectionDAG &DAG) const {
- SDNode *Node = Op.getNode();
- SDLoc dl(Node);
- StoreSDNode *ST = cast<StoreSDNode>(Node);
- SDValue Tmp1 = ST->getChain();
- SDValue Tmp2 = ST->getBasePtr();
- SDValue Tmp3 = ST->getValue();
- assert(Tmp3.getValueType() == MVT::i1 && "Custom lowering for i1 store only");
- Tmp3 = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i16, Tmp3);
- SDValue Result =
- DAG.getTruncStore(Tmp1, dl, Tmp3, Tmp2, ST->getPointerInfo(), MVT::i8,
- ST->getAlignment(), ST->getMemOperand()->getFlags());
- return Result;
-}
-
-SDValue
-NVPTXTargetLowering::getParamSymbol(SelectionDAG &DAG, int idx, EVT v) const {
- std::string ParamSym;
- raw_string_ostream ParamStr(ParamSym);
-
- ParamStr << DAG.getMachineFunction().getName() << "_param_" << idx;
- ParamStr.flush();
-
- std::string *SavedStr =
- nvTM->getManagedStrPool()->getManagedString(ParamSym.c_str());
- return DAG.getTargetExternalSymbol(SavedStr->c_str(), v);
-}
-
-// Check to see if the kernel argument is image*_t or sampler_t
-
-static bool isImageOrSamplerVal(const Value *arg, const Module *context) {
- static const char *const specialTypes[] = { "struct._image2d_t",
- "struct._image3d_t",
- "struct._sampler_t" };
-
- Type *Ty = arg->getType();
- auto *PTy = dyn_cast<PointerType>(Ty);
-
- if (!PTy)
- return false;
-
- if (!context)
- return false;
-
- auto *STy = dyn_cast<StructType>(PTy->getElementType());
- if (!STy || STy->isLiteral())
- return false;
-
- return std::find(std::begin(specialTypes), std::end(specialTypes),
- STy->getName()) != std::end(specialTypes);
-}
-
-SDValue NVPTXTargetLowering::LowerFormalArguments(
- SDValue Chain, CallingConv::ID CallConv, bool isVarArg,
- const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl,
- SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const {
- MachineFunction &MF = DAG.getMachineFunction();
- const DataLayout &DL = DAG.getDataLayout();
- auto PtrVT = getPointerTy(DAG.getDataLayout());
-
- const Function *F = MF.getFunction();
- const AttributeList &PAL = F->getAttributes();
- const TargetLowering *TLI = STI.getTargetLowering();
-
- SDValue Root = DAG.getRoot();
- std::vector<SDValue> OutChains;
-
- bool isABI = (STI.getSmVersion() >= 20);
- assert(isABI && "Non-ABI compilation is not supported");
- if (!isABI)
- return Chain;
-
- std::vector<Type *> argTypes;
- std::vector<const Argument *> theArgs;
- for (const Argument &I : F->args()) {
- theArgs.push_back(&I);
- argTypes.push_back(I.getType());
- }
- // argTypes.size() (or theArgs.size()) and Ins.size() need not match.
- // Ins.size() will be larger
- // * if there is an aggregate argument with multiple fields (each field
- // showing up separately in Ins)
- // * if there is a vector argument with more than typical vector-length
- // elements (generally if more than 4) where each vector element is
- // individually present in Ins.
- // So a different index should be used for indexing into Ins.
- // See similar issue in LowerCall.
- unsigned InsIdx = 0;
-
- int idx = 0;
- for (unsigned i = 0, e = theArgs.size(); i != e; ++i, ++idx, ++InsIdx) {
- Type *Ty = argTypes[i];
-
- // If the kernel argument is image*_t or sampler_t, convert it to
- // a i32 constant holding the parameter position. This can later
- // matched in the AsmPrinter to output the correct mangled name.
- if (isImageOrSamplerVal(
- theArgs[i],
- (theArgs[i]->getParent() ? theArgs[i]->getParent()->getParent()
- : nullptr))) {
- assert(isKernelFunction(*F) &&
- "Only kernels can have image/sampler params");
- InVals.push_back(DAG.getConstant(i + 1, dl, MVT::i32));
- continue;
- }
-
- if (theArgs[i]->use_empty()) {
- // argument is dead
- if (Ty->isAggregateType()) {
- SmallVector<EVT, 16> vtparts;
-
- ComputePTXValueVTs(*this, DAG.getDataLayout(), Ty, vtparts);
- assert(vtparts.size() > 0 && "empty aggregate type not expected");
- for (unsigned parti = 0, parte = vtparts.size(); parti != parte;
- ++parti) {
- InVals.push_back(DAG.getNode(ISD::UNDEF, dl, Ins[InsIdx].VT));
- ++InsIdx;
- }
- if (vtparts.size() > 0)
- --InsIdx;
- continue;
- }
- if (Ty->isVectorTy()) {
- EVT ObjectVT = getValueType(DL, Ty);
- unsigned NumRegs = TLI->getNumRegisters(F->getContext(), ObjectVT);
- for (unsigned parti = 0; parti < NumRegs; ++parti) {
- InVals.push_back(DAG.getNode(ISD::UNDEF, dl, Ins[InsIdx].VT));
- ++InsIdx;
- }
- if (NumRegs > 0)
- --InsIdx;
- continue;
- }
- InVals.push_back(DAG.getNode(ISD::UNDEF, dl, Ins[InsIdx].VT));
- continue;
- }
-
- // In the following cases, assign a node order of "idx+1"
- // to newly created nodes. The SDNodes for params have to
- // appear in the same order as their order of appearance
- // in the original function. "idx+1" holds that order.
- if (!PAL.hasParamAttribute(i, Attribute::ByVal)) {
- bool aggregateIsPacked = false;
- if (StructType *STy = dyn_cast<StructType>(Ty))
- aggregateIsPacked = STy->isPacked();
-
- SmallVector<EVT, 16> VTs;
- SmallVector<uint64_t, 16> Offsets;
- ComputePTXValueVTs(*this, DL, Ty, VTs, &Offsets, 0);
- assert(VTs.size() > 0 && "Unexpected empty type.");
- auto VectorInfo =
- VectorizePTXValueVTs(VTs, Offsets, DL.getABITypeAlignment(Ty));
-
- SDValue Arg = getParamSymbol(DAG, idx, PtrVT);
- int VecIdx = -1; // Index of the first element of the current vector.
- for (unsigned parti = 0, parte = VTs.size(); parti != parte; ++parti) {
- if (VectorInfo[parti] & PVF_FIRST) {
- assert(VecIdx == -1 && "Orphaned vector.");
- VecIdx = parti;
- }
-
- // That's the last element of this store op.
- if (VectorInfo[parti] & PVF_LAST) {
- unsigned NumElts = parti - VecIdx + 1;
- EVT EltVT = VTs[parti];
- // i1 is loaded/stored as i8.
- EVT LoadVT = EltVT;
- if (EltVT == MVT::i1)
- LoadVT = MVT::i8;
- else if (EltVT == MVT::v2f16)
- // getLoad needs a vector type, but it can't handle
- // vectors which contain v2f16 elements. So we must load
- // using i32 here and then bitcast back.
- LoadVT = MVT::i32;
-
- EVT VecVT = EVT::getVectorVT(F->getContext(), LoadVT, NumElts);
- SDValue VecAddr =
- DAG.getNode(ISD::ADD, dl, PtrVT, Arg,
- DAG.getConstant(Offsets[VecIdx], dl, PtrVT));
- Value *srcValue = Constant::getNullValue(PointerType::get(
- EltVT.getTypeForEVT(F->getContext()), ADDRESS_SPACE_PARAM));
- SDValue P =
- DAG.getLoad(VecVT, dl, Root, VecAddr,
- MachinePointerInfo(srcValue), aggregateIsPacked,
- MachineMemOperand::MODereferenceable |
- MachineMemOperand::MOInvariant);
- if (P.getNode())
- P.getNode()->setIROrder(idx + 1);
- for (unsigned j = 0; j < NumElts; ++j) {
- SDValue Elt = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, LoadVT, P,
- DAG.getIntPtrConstant(j, dl));
- // We've loaded i1 as an i8 and now must truncate it back to i1
- if (EltVT == MVT::i1)
- Elt = DAG.getNode(ISD::TRUNCATE, dl, MVT::i1, Elt);
- // v2f16 was loaded as an i32. Now we must bitcast it back.
- else if (EltVT == MVT::v2f16)
- Elt = DAG.getNode(ISD::BITCAST, dl, MVT::v2f16, Elt);
- // Extend the element if necesary (e.g. an i8 is loaded
- // into an i16 register)
- if (Ins[InsIdx].VT.isInteger() &&
- Ins[InsIdx].VT.getSizeInBits() > LoadVT.getSizeInBits()) {
- unsigned Extend = Ins[InsIdx].Flags.isSExt() ? ISD::SIGN_EXTEND
- : ISD::ZERO_EXTEND;
- Elt = DAG.getNode(Extend, dl, Ins[InsIdx].VT, Elt);
- }
- InVals.push_back(Elt);
- }
-
- // Reset vector tracking state.
- VecIdx = -1;
- }
- ++InsIdx;
- }
- if (VTs.size() > 0)
- --InsIdx;
- continue;
- }
-
- // Param has ByVal attribute
- // Return MoveParam(param symbol).
- // Ideally, the param symbol can be returned directly,
- // but when SDNode builder decides to use it in a CopyToReg(),
- // machine instruction fails because TargetExternalSymbol
- // (not lowered) is target dependent, and CopyToReg assumes
- // the source is lowered.
- EVT ObjectVT = getValueType(DL, Ty);
- assert(ObjectVT == Ins[InsIdx].VT &&
- "Ins type did not match function type");
- SDValue Arg = getParamSymbol(DAG, idx, PtrVT);
- SDValue p = DAG.getNode(NVPTXISD::MoveParam, dl, ObjectVT, Arg);
- if (p.getNode())
- p.getNode()->setIROrder(idx + 1);
- InVals.push_back(p);
- }
-
- // Clang will check explicit VarArg and issue error if any. However, Clang
- // will let code with
- // implicit var arg like f() pass. See bug 617733.
- // We treat this case as if the arg list is empty.
- // if (F.isVarArg()) {
- // assert(0 && "VarArg not supported yet!");
- //}
-
- if (!OutChains.empty())
- DAG.setRoot(DAG.getNode(ISD::TokenFactor, dl, MVT::Other, OutChains));
-
- return Chain;
-}
-
-SDValue
-NVPTXTargetLowering::LowerReturn(SDValue Chain, CallingConv::ID CallConv,
- bool isVarArg,
- const SmallVectorImpl<ISD::OutputArg> &Outs,
- const SmallVectorImpl<SDValue> &OutVals,
- const SDLoc &dl, SelectionDAG &DAG) const {
- MachineFunction &MF = DAG.getMachineFunction();
- Type *RetTy = MF.getFunction()->getReturnType();
-
- bool isABI = (STI.getSmVersion() >= 20);
- assert(isABI && "Non-ABI compilation is not supported");
- if (!isABI)
- return Chain;
-
- const DataLayout DL = DAG.getDataLayout();
- SmallVector<EVT, 16> VTs;
- SmallVector<uint64_t, 16> Offsets;
- ComputePTXValueVTs(*this, DL, RetTy, VTs, &Offsets);
- assert(VTs.size() == OutVals.size() && "Bad return value decomposition");
-
- auto VectorInfo = VectorizePTXValueVTs(
- VTs, Offsets, RetTy->isSized() ? DL.getABITypeAlignment(RetTy) : 1);
-
- // PTX Interoperability Guide 3.3(A): [Integer] Values shorter than
- // 32-bits are sign extended or zero extended, depending on whether
- // they are signed or unsigned types.
- bool ExtendIntegerRetVal =
- RetTy->isIntegerTy() && DL.getTypeAllocSizeInBits(RetTy) < 32;
-
- SmallVector<SDValue, 6> StoreOperands;
- for (unsigned i = 0, e = VTs.size(); i != e; ++i) {
- // New load/store. Record chain and offset operands.
- if (VectorInfo[i] & PVF_FIRST) {
- assert(StoreOperands.empty() && "Orphaned operand list.");
- StoreOperands.push_back(Chain);
- StoreOperands.push_back(DAG.getConstant(Offsets[i], dl, MVT::i32));
- }
-
- SDValue RetVal = OutVals[i];
- if (ExtendIntegerRetVal) {
- RetVal = DAG.getNode(Outs[i].Flags.isSExt() ? ISD::SIGN_EXTEND
- : ISD::ZERO_EXTEND,
- dl, MVT::i32, RetVal);
- } else if (RetVal.getValueSizeInBits() < 16) {
- // Use 16-bit registers for small load-stores as it's the
- // smallest general purpose register size supported by NVPTX.
- RetVal = DAG.getNode(ISD::ANY_EXTEND, dl, MVT::i16, RetVal);
- }
-
- // Record the value to return.
- StoreOperands.push_back(RetVal);
-
- // That's the last element of this store op.
- if (VectorInfo[i] & PVF_LAST) {
- NVPTXISD::NodeType Op;
- unsigned NumElts = StoreOperands.size() - 2;
- switch (NumElts) {
- case 1:
- Op = NVPTXISD::StoreRetval;
- break;
- case 2:
- Op = NVPTXISD::StoreRetvalV2;
- break;
- case 4:
- Op = NVPTXISD::StoreRetvalV4;
- break;
- default:
- llvm_unreachable("Invalid vector info.");
- }
-
- // Adjust type of load/store op if we've extended the scalar
- // return value.
- EVT TheStoreType = ExtendIntegerRetVal ? MVT::i32 : VTs[i];
- Chain = DAG.getMemIntrinsicNode(Op, dl, DAG.getVTList(MVT::Other),
- StoreOperands, TheStoreType,
- MachinePointerInfo(), 1);
- // Cleanup vector state.
- StoreOperands.clear();
- }
- }
-
- return DAG.getNode(NVPTXISD::RET_FLAG, dl, MVT::Other, Chain);
-}
-
-void NVPTXTargetLowering::LowerAsmOperandForConstraint(
- SDValue Op, std::string &Constraint, std::vector<SDValue> &Ops,
- SelectionDAG &DAG) const {
- if (Constraint.length() > 1)
- return;
- else
- TargetLowering::LowerAsmOperandForConstraint(Op, Constraint, Ops, DAG);
-}
-
-static unsigned getOpcForTextureInstr(unsigned Intrinsic) {
- switch (Intrinsic) {
- default:
- return 0;
-
- case Intrinsic::nvvm_tex_1d_v4f32_s32:
- return NVPTXISD::Tex1DFloatS32;
- case Intrinsic::nvvm_tex_1d_v4f32_f32:
- return NVPTXISD::Tex1DFloatFloat;
- case Intrinsic::nvvm_tex_1d_level_v4f32_f32:
- return NVPTXISD::Tex1DFloatFloatLevel;
- case Intrinsic::nvvm_tex_1d_grad_v4f32_f32:
- return NVPTXISD::Tex1DFloatFloatGrad;
- case Intrinsic::nvvm_tex_1d_v4s32_s32:
- return NVPTXISD::Tex1DS32S32;
- case Intrinsic::nvvm_tex_1d_v4s32_f32:
- return NVPTXISD::Tex1DS32Float;
- case Intrinsic::nvvm_tex_1d_level_v4s32_f32:
- return NVPTXISD::Tex1DS32FloatLevel;
- case Intrinsic::nvvm_tex_1d_grad_v4s32_f32:
- return NVPTXISD::Tex1DS32FloatGrad;
- case Intrinsic::nvvm_tex_1d_v4u32_s32:
- return NVPTXISD::Tex1DU32S32;
- case Intrinsic::nvvm_tex_1d_v4u32_f32:
- return NVPTXISD::Tex1DU32Float;
- case Intrinsic::nvvm_tex_1d_level_v4u32_f32:
- return NVPTXISD::Tex1DU32FloatLevel;
- case Intrinsic::nvvm_tex_1d_grad_v4u32_f32:
- return NVPTXISD::Tex1DU32FloatGrad;
-
- case Intrinsic::nvvm_tex_1d_array_v4f32_s32:
- return NVPTXISD::Tex1DArrayFloatS32;
- case Intrinsic::nvvm_tex_1d_array_v4f32_f32:
- return NVPTXISD::Tex1DArrayFloatFloat;
- case Intrinsic::nvvm_tex_1d_array_level_v4f32_f32:
- return NVPTXISD::Tex1DArrayFloatFloatLevel;
- case Intrinsic::nvvm_tex_1d_array_grad_v4f32_f32:
- return NVPTXISD::Tex1DArrayFloatFloatGrad;
- case Intrinsic::nvvm_tex_1d_array_v4s32_s32:
- return NVPTXISD::Tex1DArrayS32S32;
- case Intrinsic::nvvm_tex_1d_array_v4s32_f32:
- return NVPTXISD::Tex1DArrayS32Float;
- case Intrinsic::nvvm_tex_1d_array_level_v4s32_f32:
- return NVPTXISD::Tex1DArrayS32FloatLevel;
- case Intrinsic::nvvm_tex_1d_array_grad_v4s32_f32:
- return NVPTXISD::Tex1DArrayS32FloatGrad;
- case Intrinsic::nvvm_tex_1d_array_v4u32_s32:
- return NVPTXISD::Tex1DArrayU32S32;
- case Intrinsic::nvvm_tex_1d_array_v4u32_f32:
- return NVPTXISD::Tex1DArrayU32Float;
- case Intrinsic::nvvm_tex_1d_array_level_v4u32_f32:
- return NVPTXISD::Tex1DArrayU32FloatLevel;
- case Intrinsic::nvvm_tex_1d_array_grad_v4u32_f32:
- return NVPTXISD::Tex1DArrayU32FloatGrad;
-
- case Intrinsic::nvvm_tex_2d_v4f32_s32:
- return NVPTXISD::Tex2DFloatS32;
- case Intrinsic::nvvm_tex_2d_v4f32_f32:
- return NVPTXISD::Tex2DFloatFloat;
- case Intrinsic::nvvm_tex_2d_level_v4f32_f32:
- return NVPTXISD::Tex2DFloatFloatLevel;
- case Intrinsic::nvvm_tex_2d_grad_v4f32_f32:
- return NVPTXISD::Tex2DFloatFloatGrad;
- case Intrinsic::nvvm_tex_2d_v4s32_s32:
- return NVPTXISD::Tex2DS32S32;
- case Intrinsic::nvvm_tex_2d_v4s32_f32:
- return NVPTXISD::Tex2DS32Float;
- case Intrinsic::nvvm_tex_2d_level_v4s32_f32:
- return NVPTXISD::Tex2DS32FloatLevel;
- case Intrinsic::nvvm_tex_2d_grad_v4s32_f32:
- return NVPTXISD::Tex2DS32FloatGrad;
- case Intrinsic::nvvm_tex_2d_v4u32_s32:
- return NVPTXISD::Tex2DU32S32;
- case Intrinsic::nvvm_tex_2d_v4u32_f32:
- return NVPTXISD::Tex2DU32Float;
- case Intrinsic::nvvm_tex_2d_level_v4u32_f32:
- return NVPTXISD::Tex2DU32FloatLevel;
- case Intrinsic::nvvm_tex_2d_grad_v4u32_f32:
- return NVPTXISD::Tex2DU32FloatGrad;
-
- case Intrinsic::nvvm_tex_2d_array_v4f32_s32:
- return NVPTXISD::Tex2DArrayFloatS32;
- case Intrinsic::nvvm_tex_2d_array_v4f32_f32:
- return NVPTXISD::Tex2DArrayFloatFloat;
- case Intrinsic::nvvm_tex_2d_array_level_v4f32_f32:
- return NVPTXISD::Tex2DArrayFloatFloatLevel;
- case Intrinsic::nvvm_tex_2d_array_grad_v4f32_f32:
- return NVPTXISD::Tex2DArrayFloatFloatGrad;
- case Intrinsic::nvvm_tex_2d_array_v4s32_s32:
- return NVPTXISD::Tex2DArrayS32S32;
- case Intrinsic::nvvm_tex_2d_array_v4s32_f32:
- return NVPTXISD::Tex2DArrayS32Float;
- case Intrinsic::nvvm_tex_2d_array_level_v4s32_f32:
- return NVPTXISD::Tex2DArrayS32FloatLevel;
- case Intrinsic::nvvm_tex_2d_array_grad_v4s32_f32:
- return NVPTXISD::Tex2DArrayS32FloatGrad;
- case Intrinsic::nvvm_tex_2d_array_v4u32_s32:
- return NVPTXISD::Tex2DArrayU32S32;
- case Intrinsic::nvvm_tex_2d_array_v4u32_f32:
- return NVPTXISD::Tex2DArrayU32Float;
- case Intrinsic::nvvm_tex_2d_array_level_v4u32_f32:
- return NVPTXISD::Tex2DArrayU32FloatLevel;
- case Intrinsic::nvvm_tex_2d_array_grad_v4u32_f32:
- return NVPTXISD::Tex2DArrayU32FloatGrad;
-
- case Intrinsic::nvvm_tex_3d_v4f32_s32:
- return NVPTXISD::Tex3DFloatS32;
- case Intrinsic::nvvm_tex_3d_v4f32_f32:
- return NVPTXISD::Tex3DFloatFloat;
- case Intrinsic::nvvm_tex_3d_level_v4f32_f32:
- return NVPTXISD::Tex3DFloatFloatLevel;
- case Intrinsic::nvvm_tex_3d_grad_v4f32_f32:
- return NVPTXISD::Tex3DFloatFloatGrad;
- case Intrinsic::nvvm_tex_3d_v4s32_s32:
- return NVPTXISD::Tex3DS32S32;
- case Intrinsic::nvvm_tex_3d_v4s32_f32:
- return NVPTXISD::Tex3DS32Float;
- case Intrinsic::nvvm_tex_3d_level_v4s32_f32:
- return NVPTXISD::Tex3DS32FloatLevel;
- case Intrinsic::nvvm_tex_3d_grad_v4s32_f32:
- return NVPTXISD::Tex3DS32FloatGrad;
- case Intrinsic::nvvm_tex_3d_v4u32_s32:
- return NVPTXISD::Tex3DU32S32;
- case Intrinsic::nvvm_tex_3d_v4u32_f32:
- return NVPTXISD::Tex3DU32Float;
- case Intrinsic::nvvm_tex_3d_level_v4u32_f32:
- return NVPTXISD::Tex3DU32FloatLevel;
- case Intrinsic::nvvm_tex_3d_grad_v4u32_f32:
- return NVPTXISD::Tex3DU32FloatGrad;
-
- case Intrinsic::nvvm_tex_cube_v4f32_f32:
- return NVPTXISD::TexCubeFloatFloat;
- case Intrinsic::nvvm_tex_cube_level_v4f32_f32:
- return NVPTXISD::TexCubeFloatFloatLevel;
- case Intrinsic::nvvm_tex_cube_v4s32_f32:
- return NVPTXISD::TexCubeS32Float;
- case Intrinsic::nvvm_tex_cube_level_v4s32_f32:
- return NVPTXISD::TexCubeS32FloatLevel;
- case Intrinsic::nvvm_tex_cube_v4u32_f32:
- return NVPTXISD::TexCubeU32Float;
- case Intrinsic::nvvm_tex_cube_level_v4u32_f32:
- return NVPTXISD::TexCubeU32FloatLevel;
-
- case Intrinsic::nvvm_tex_cube_array_v4f32_f32:
- return NVPTXISD::TexCubeArrayFloatFloat;
- case Intrinsic::nvvm_tex_cube_array_level_v4f32_f32:
- return NVPTXISD::TexCubeArrayFloatFloatLevel;
- case Intrinsic::nvvm_tex_cube_array_v4s32_f32:
- return NVPTXISD::TexCubeArrayS32Float;
- case Intrinsic::nvvm_tex_cube_array_level_v4s32_f32:
- return NVPTXISD::TexCubeArrayS32FloatLevel;
- case Intrinsic::nvvm_tex_cube_array_v4u32_f32:
- return NVPTXISD::TexCubeArrayU32Float;
- case Intrinsic::nvvm_tex_cube_array_level_v4u32_f32:
- return NVPTXISD::TexCubeArrayU32FloatLevel;
-
- case Intrinsic::nvvm_tld4_r_2d_v4f32_f32:
- return NVPTXISD::Tld4R2DFloatFloat;
- case Intrinsic::nvvm_tld4_g_2d_v4f32_f32:
- return NVPTXISD::Tld4G2DFloatFloat;
- case Intrinsic::nvvm_tld4_b_2d_v4f32_f32:
- return NVPTXISD::Tld4B2DFloatFloat;
- case Intrinsic::nvvm_tld4_a_2d_v4f32_f32:
- return NVPTXISD::Tld4A2DFloatFloat;
- case Intrinsic::nvvm_tld4_r_2d_v4s32_f32:
- return NVPTXISD::Tld4R2DS64Float;
- case Intrinsic::nvvm_tld4_g_2d_v4s32_f32:
- return NVPTXISD::Tld4G2DS64Float;
- case Intrinsic::nvvm_tld4_b_2d_v4s32_f32:
- return NVPTXISD::Tld4B2DS64Float;
- case Intrinsic::nvvm_tld4_a_2d_v4s32_f32:
- return NVPTXISD::Tld4A2DS64Float;
- case Intrinsic::nvvm_tld4_r_2d_v4u32_f32:
- return NVPTXISD::Tld4R2DU64Float;
- case Intrinsic::nvvm_tld4_g_2d_v4u32_f32:
- return NVPTXISD::Tld4G2DU64Float;
- case Intrinsic::nvvm_tld4_b_2d_v4u32_f32:
- return NVPTXISD::Tld4B2DU64Float;
- case Intrinsic::nvvm_tld4_a_2d_v4u32_f32:
- return NVPTXISD::Tld4A2DU64Float;
-
- case Intrinsic::nvvm_tex_unified_1d_v4f32_s32:
- return NVPTXISD::TexUnified1DFloatS32;
- case Intrinsic::nvvm_tex_unified_1d_v4f32_f32:
- return NVPTXISD::TexUnified1DFloatFloat;
- case Intrinsic::nvvm_tex_unified_1d_level_v4f32_f32:
- return NVPTXISD::TexUnified1DFloatFloatLevel;
- case Intrinsic::nvvm_tex_unified_1d_grad_v4f32_f32:
- return NVPTXISD::TexUnified1DFloatFloatGrad;
- case Intrinsic::nvvm_tex_unified_1d_v4s32_s32:
- return NVPTXISD::TexUnified1DS32S32;
- case Intrinsic::nvvm_tex_unified_1d_v4s32_f32:
- return NVPTXISD::TexUnified1DS32Float;
- case Intrinsic::nvvm_tex_unified_1d_level_v4s32_f32:
- return NVPTXISD::TexUnified1DS32FloatLevel;
- case Intrinsic::nvvm_tex_unified_1d_grad_v4s32_f32:
- return NVPTXISD::TexUnified1DS32FloatGrad;
- case Intrinsic::nvvm_tex_unified_1d_v4u32_s32:
- return NVPTXISD::TexUnified1DU32S32;
- case Intrinsic::nvvm_tex_unified_1d_v4u32_f32:
- return NVPTXISD::TexUnified1DU32Float;
- case Intrinsic::nvvm_tex_unified_1d_level_v4u32_f32:
- return NVPTXISD::TexUnified1DU32FloatLevel;
- case Intrinsic::nvvm_tex_unified_1d_grad_v4u32_f32:
- return NVPTXISD::TexUnified1DU32FloatGrad;
-
- case Intrinsic::nvvm_tex_unified_1d_array_v4f32_s32:
- return NVPTXISD::TexUnified1DArrayFloatS32;
- case Intrinsic::nvvm_tex_unified_1d_array_v4f32_f32:
- return NVPTXISD::TexUnified1DArrayFloatFloat;
- case Intrinsic::nvvm_tex_unified_1d_array_level_v4f32_f32:
- return NVPTXISD::TexUnified1DArrayFloatFloatLevel;
- case Intrinsic::nvvm_tex_unified_1d_array_grad_v4f32_f32:
- return NVPTXISD::TexUnified1DArrayFloatFloatGrad;
- case Intrinsic::nvvm_tex_unified_1d_array_v4s32_s32:
- return NVPTXISD::TexUnified1DArrayS32S32;
- case Intrinsic::nvvm_tex_unified_1d_array_v4s32_f32:
- return NVPTXISD::TexUnified1DArrayS32Float;
- case Intrinsic::nvvm_tex_unified_1d_array_level_v4s32_f32:
- return NVPTXISD::TexUnified1DArrayS32FloatLevel;
- case Intrinsic::nvvm_tex_unified_1d_array_grad_v4s32_f32:
- return NVPTXISD::TexUnified1DArrayS32FloatGrad;
- case Intrinsic::nvvm_tex_unified_1d_array_v4u32_s32:
- return NVPTXISD::TexUnified1DArrayU32S32;
- case Intrinsic::nvvm_tex_unified_1d_array_v4u32_f32:
- return NVPTXISD::TexUnified1DArrayU32Float;
- case Intrinsic::nvvm_tex_unified_1d_array_level_v4u32_f32:
- return NVPTXISD::TexUnified1DArrayU32FloatLevel;
- case Intrinsic::nvvm_tex_unified_1d_array_grad_v4u32_f32:
- return NVPTXISD::TexUnified1DArrayU32FloatGrad;
-
- case Intrinsic::nvvm_tex_unified_2d_v4f32_s32:
- return NVPTXISD::TexUnified2DFloatS32;
- case Intrinsic::nvvm_tex_unified_2d_v4f32_f32:
- return NVPTXISD::TexUnified2DFloatFloat;
- case Intrinsic::nvvm_tex_unified_2d_level_v4f32_f32:
- return NVPTXISD::TexUnified2DFloatFloatLevel;
- case Intrinsic::nvvm_tex_unified_2d_grad_v4f32_f32:
- return NVPTXISD::TexUnified2DFloatFloatGrad;
- case Intrinsic::nvvm_tex_unified_2d_v4s32_s32:
- return NVPTXISD::TexUnified2DS32S32;
- case Intrinsic::nvvm_tex_unified_2d_v4s32_f32:
- return NVPTXISD::TexUnified2DS32Float;
- case Intrinsic::nvvm_tex_unified_2d_level_v4s32_f32:
- return NVPTXISD::TexUnified2DS32FloatLevel;
- case Intrinsic::nvvm_tex_unified_2d_grad_v4s32_f32:
- return NVPTXISD::TexUnified2DS32FloatGrad;
- case Intrinsic::nvvm_tex_unified_2d_v4u32_s32:
- return NVPTXISD::TexUnified2DU32S32;
- case Intrinsic::nvvm_tex_unified_2d_v4u32_f32:
- return NVPTXISD::TexUnified2DU32Float;
- case Intrinsic::nvvm_tex_unified_2d_level_v4u32_f32:
- return NVPTXISD::TexUnified2DU32FloatLevel;
- case Intrinsic::nvvm_tex_unified_2d_grad_v4u32_f32:
- return NVPTXISD::TexUnified2DU32FloatGrad;
-
- case Intrinsic::nvvm_tex_unified_2d_array_v4f32_s32:
- return NVPTXISD::TexUnified2DArrayFloatS32;
- case Intrinsic::nvvm_tex_unified_2d_array_v4f32_f32:
- return NVPTXISD::TexUnified2DArrayFloatFloat;
- case Intrinsic::nvvm_tex_unified_2d_array_level_v4f32_f32:
- return NVPTXISD::TexUnified2DArrayFloatFloatLevel;
- case Intrinsic::nvvm_tex_unified_2d_array_grad_v4f32_f32:
- return NVPTXISD::TexUnified2DArrayFloatFloatGrad;
- case Intrinsic::nvvm_tex_unified_2d_array_v4s32_s32:
- return NVPTXISD::TexUnified2DArrayS32S32;
- case Intrinsic::nvvm_tex_unified_2d_array_v4s32_f32:
- return NVPTXISD::TexUnified2DArrayS32Float;
- case Intrinsic::nvvm_tex_unified_2d_array_level_v4s32_f32:
- return NVPTXISD::TexUnified2DArrayS32FloatLevel;
- case Intrinsic::nvvm_tex_unified_2d_array_grad_v4s32_f32:
- return NVPTXISD::TexUnified2DArrayS32FloatGrad;
- case Intrinsic::nvvm_tex_unified_2d_array_v4u32_s32:
- return NVPTXISD::TexUnified2DArrayU32S32;
- case Intrinsic::nvvm_tex_unified_2d_array_v4u32_f32:
- return NVPTXISD::TexUnified2DArrayU32Float;
- case Intrinsic::nvvm_tex_unified_2d_array_level_v4u32_f32:
- return NVPTXISD::TexUnified2DArrayU32FloatLevel;
- case Intrinsic::nvvm_tex_unified_2d_array_grad_v4u32_f32:
- return NVPTXISD::TexUnified2DArrayU32FloatGrad;
-
- case Intrinsic::nvvm_tex_unified_3d_v4f32_s32:
- return NVPTXISD::TexUnified3DFloatS32;
- case Intrinsic::nvvm_tex_unified_3d_v4f32_f32:
- return NVPTXISD::TexUnified3DFloatFloat;
- case Intrinsic::nvvm_tex_unified_3d_level_v4f32_f32:
- return NVPTXISD::TexUnified3DFloatFloatLevel;
- case Intrinsic::nvvm_tex_unified_3d_grad_v4f32_f32:
- return NVPTXISD::TexUnified3DFloatFloatGrad;
- case Intrinsic::nvvm_tex_unified_3d_v4s32_s32:
- return NVPTXISD::TexUnified3DS32S32;
- case Intrinsic::nvvm_tex_unified_3d_v4s32_f32:
- return NVPTXISD::TexUnified3DS32Float;
- case Intrinsic::nvvm_tex_unified_3d_level_v4s32_f32:
- return NVPTXISD::TexUnified3DS32FloatLevel;
- case Intrinsic::nvvm_tex_unified_3d_grad_v4s32_f32:
- return NVPTXISD::TexUnified3DS32FloatGrad;
- case Intrinsic::nvvm_tex_unified_3d_v4u32_s32:
- return NVPTXISD::TexUnified3DU32S32;
- case Intrinsic::nvvm_tex_unified_3d_v4u32_f32:
- return NVPTXISD::TexUnified3DU32Float;
- case Intrinsic::nvvm_tex_unified_3d_level_v4u32_f32:
- return NVPTXISD::TexUnified3DU32FloatLevel;
- case Intrinsic::nvvm_tex_unified_3d_grad_v4u32_f32:
- return NVPTXISD::TexUnified3DU32FloatGrad;
-
- case Intrinsic::nvvm_tex_unified_cube_v4f32_f32:
- return NVPTXISD::TexUnifiedCubeFloatFloat;
- case Intrinsic::nvvm_tex_unified_cube_level_v4f32_f32:
- return NVPTXISD::TexUnifiedCubeFloatFloatLevel;
- case Intrinsic::nvvm_tex_unified_cube_v4s32_f32:
- return NVPTXISD::TexUnifiedCubeS32Float;
- case Intrinsic::nvvm_tex_unified_cube_level_v4s32_f32:
- return NVPTXISD::TexUnifiedCubeS32FloatLevel;
- case Intrinsic::nvvm_tex_unified_cube_v4u32_f32:
- return NVPTXISD::TexUnifiedCubeU32Float;
- case Intrinsic::nvvm_tex_unified_cube_level_v4u32_f32:
- return NVPTXISD::TexUnifiedCubeU32FloatLevel;
-
- case Intrinsic::nvvm_tex_unified_cube_array_v4f32_f32:
- return NVPTXISD::TexUnifiedCubeArrayFloatFloat;
- case Intrinsic::nvvm_tex_unified_cube_array_level_v4f32_f32:
- return NVPTXISD::TexUnifiedCubeArrayFloatFloatLevel;
- case Intrinsic::nvvm_tex_unified_cube_array_v4s32_f32:
- return NVPTXISD::TexUnifiedCubeArrayS32Float;
- case Intrinsic::nvvm_tex_unified_cube_array_level_v4s32_f32:
- return NVPTXISD::TexUnifiedCubeArrayS32FloatLevel;
- case Intrinsic::nvvm_tex_unified_cube_array_v4u32_f32:
- return NVPTXISD::TexUnifiedCubeArrayU32Float;
- case Intrinsic::nvvm_tex_unified_cube_array_level_v4u32_f32:
- return NVPTXISD::TexUnifiedCubeArrayU32FloatLevel;
-
- case Intrinsic::nvvm_tld4_unified_r_2d_v4f32_f32:
- return NVPTXISD::Tld4UnifiedR2DFloatFloat;
- case Intrinsic::nvvm_tld4_unified_g_2d_v4f32_f32:
- return NVPTXISD::Tld4UnifiedG2DFloatFloat;
- case Intrinsic::nvvm_tld4_unified_b_2d_v4f32_f32:
- return NVPTXISD::Tld4UnifiedB2DFloatFloat;
- case Intrinsic::nvvm_tld4_unified_a_2d_v4f32_f32:
- return NVPTXISD::Tld4UnifiedA2DFloatFloat;
- case Intrinsic::nvvm_tld4_unified_r_2d_v4s32_f32:
- return NVPTXISD::Tld4UnifiedR2DS64Float;
- case Intrinsic::nvvm_tld4_unified_g_2d_v4s32_f32:
- return NVPTXISD::Tld4UnifiedG2DS64Float;
- case Intrinsic::nvvm_tld4_unified_b_2d_v4s32_f32:
- return NVPTXISD::Tld4UnifiedB2DS64Float;
- case Intrinsic::nvvm_tld4_unified_a_2d_v4s32_f32:
- return NVPTXISD::Tld4UnifiedA2DS64Float;
- case Intrinsic::nvvm_tld4_unified_r_2d_v4u32_f32:
- return NVPTXISD::Tld4UnifiedR2DU64Float;
- case Intrinsic::nvvm_tld4_unified_g_2d_v4u32_f32:
- return NVPTXISD::Tld4UnifiedG2DU64Float;
- case Intrinsic::nvvm_tld4_unified_b_2d_v4u32_f32:
- return NVPTXISD::Tld4UnifiedB2DU64Float;
- case Intrinsic::nvvm_tld4_unified_a_2d_v4u32_f32:
- return NVPTXISD::Tld4UnifiedA2DU64Float;
- }
-}
-
-static unsigned getOpcForSurfaceInstr(unsigned Intrinsic) {
- switch (Intrinsic) {
- default:
- return 0;
- case Intrinsic::nvvm_suld_1d_i8_clamp:
- return NVPTXISD::Suld1DI8Clamp;
- case Intrinsic::nvvm_suld_1d_i16_clamp:
- return NVPTXISD::Suld1DI16Clamp;
- case Intrinsic::nvvm_suld_1d_i32_clamp:
- return NVPTXISD::Suld1DI32Clamp;
- case Intrinsic::nvvm_suld_1d_i64_clamp:
- return NVPTXISD::Suld1DI64Clamp;
- case Intrinsic::nvvm_suld_1d_v2i8_clamp:
- return NVPTXISD::Suld1DV2I8Clamp;
- case Intrinsic::nvvm_suld_1d_v2i16_clamp:
- return NVPTXISD::Suld1DV2I16Clamp;
- case Intrinsic::nvvm_suld_1d_v2i32_clamp:
- return NVPTXISD::Suld1DV2I32Clamp;
- case Intrinsic::nvvm_suld_1d_v2i64_clamp:
- return NVPTXISD::Suld1DV2I64Clamp;
- case Intrinsic::nvvm_suld_1d_v4i8_clamp:
- return NVPTXISD::Suld1DV4I8Clamp;
- case Intrinsic::nvvm_suld_1d_v4i16_clamp:
- return NVPTXISD::Suld1DV4I16Clamp;
- case Intrinsic::nvvm_suld_1d_v4i32_clamp:
- return NVPTXISD::Suld1DV4I32Clamp;
- case Intrinsic::nvvm_suld_1d_array_i8_clamp:
- return NVPTXISD::Suld1DArrayI8Clamp;
- case Intrinsic::nvvm_suld_1d_array_i16_clamp:
- return NVPTXISD::Suld1DArrayI16Clamp;
- case Intrinsic::nvvm_suld_1d_array_i32_clamp:
- return NVPTXISD::Suld1DArrayI32Clamp;
- case Intrinsic::nvvm_suld_1d_array_i64_clamp:
- return NVPTXISD::Suld1DArrayI64Clamp;
- case Intrinsic::nvvm_suld_1d_array_v2i8_clamp:
- return NVPTXISD::Suld1DArrayV2I8Clamp;
- case Intrinsic::nvvm_suld_1d_array_v2i16_clamp:
- return NVPTXISD::Suld1DArrayV2I16Clamp;
- case Intrinsic::nvvm_suld_1d_array_v2i32_clamp:
- return NVPTXISD::Suld1DArrayV2I32Clamp;
- case Intrinsic::nvvm_suld_1d_array_v2i64_clamp:
- return NVPTXISD::Suld1DArrayV2I64Clamp;
- case Intrinsic::nvvm_suld_1d_array_v4i8_clamp:
- return NVPTXISD::Suld1DArrayV4I8Clamp;
- case Intrinsic::nvvm_suld_1d_array_v4i16_clamp:
- return NVPTXISD::Suld1DArrayV4I16Clamp;
- case Intrinsic::nvvm_suld_1d_array_v4i32_clamp:
- return NVPTXISD::Suld1DArrayV4I32Clamp;
- case Intrinsic::nvvm_suld_2d_i8_clamp:
- return NVPTXISD::Suld2DI8Clamp;
- case Intrinsic::nvvm_suld_2d_i16_clamp:
- return NVPTXISD::Suld2DI16Clamp;
- case Intrinsic::nvvm_suld_2d_i32_clamp:
- return NVPTXISD::Suld2DI32Clamp;
- case Intrinsic::nvvm_suld_2d_i64_clamp:
- return NVPTXISD::Suld2DI64Clamp;
- case Intrinsic::nvvm_suld_2d_v2i8_clamp:
- return NVPTXISD::Suld2DV2I8Clamp;
- case Intrinsic::nvvm_suld_2d_v2i16_clamp:
- return NVPTXISD::Suld2DV2I16Clamp;
- case Intrinsic::nvvm_suld_2d_v2i32_clamp:
- return NVPTXISD::Suld2DV2I32Clamp;
- case Intrinsic::nvvm_suld_2d_v2i64_clamp:
- return NVPTXISD::Suld2DV2I64Clamp;
- case Intrinsic::nvvm_suld_2d_v4i8_clamp:
- return NVPTXISD::Suld2DV4I8Clamp;
- case Intrinsic::nvvm_suld_2d_v4i16_clamp:
- return NVPTXISD::Suld2DV4I16Clamp;
- case Intrinsic::nvvm_suld_2d_v4i32_clamp:
- return NVPTXISD::Suld2DV4I32Clamp;
- case Intrinsic::nvvm_suld_2d_array_i8_clamp:
- return NVPTXISD::Suld2DArrayI8Clamp;
- case Intrinsic::nvvm_suld_2d_array_i16_clamp:
- return NVPTXISD::Suld2DArrayI16Clamp;
- case Intrinsic::nvvm_suld_2d_array_i32_clamp:
- return NVPTXISD::Suld2DArrayI32Clamp;
- case Intrinsic::nvvm_suld_2d_array_i64_clamp:
- return NVPTXISD::Suld2DArrayI64Clamp;
- case Intrinsic::nvvm_suld_2d_array_v2i8_clamp:
- return NVPTXISD::Suld2DArrayV2I8Clamp;
- case Intrinsic::nvvm_suld_2d_array_v2i16_clamp:
- return NVPTXISD::Suld2DArrayV2I16Clamp;
- case Intrinsic::nvvm_suld_2d_array_v2i32_clamp:
- return NVPTXISD::Suld2DArrayV2I32Clamp;
- case Intrinsic::nvvm_suld_2d_array_v2i64_clamp:
- return NVPTXISD::Suld2DArrayV2I64Clamp;
- case Intrinsic::nvvm_suld_2d_array_v4i8_clamp:
- return NVPTXISD::Suld2DArrayV4I8Clamp;
- case Intrinsic::nvvm_suld_2d_array_v4i16_clamp:
- return NVPTXISD::Suld2DArrayV4I16Clamp;
- case Intrinsic::nvvm_suld_2d_array_v4i32_clamp:
- return NVPTXISD::Suld2DArrayV4I32Clamp;
- case Intrinsic::nvvm_suld_3d_i8_clamp:
- return NVPTXISD::Suld3DI8Clamp;
- case Intrinsic::nvvm_suld_3d_i16_clamp:
- return NVPTXISD::Suld3DI16Clamp;
- case Intrinsic::nvvm_suld_3d_i32_clamp:
- return NVPTXISD::Suld3DI32Clamp;
- case Intrinsic::nvvm_suld_3d_i64_clamp:
- return NVPTXISD::Suld3DI64Clamp;
- case Intrinsic::nvvm_suld_3d_v2i8_clamp:
- return NVPTXISD::Suld3DV2I8Clamp;
- case Intrinsic::nvvm_suld_3d_v2i16_clamp:
- return NVPTXISD::Suld3DV2I16Clamp;
- case Intrinsic::nvvm_suld_3d_v2i32_clamp:
- return NVPTXISD::Suld3DV2I32Clamp;
- case Intrinsic::nvvm_suld_3d_v2i64_clamp:
- return NVPTXISD::Suld3DV2I64Clamp;
- case Intrinsic::nvvm_suld_3d_v4i8_clamp:
- return NVPTXISD::Suld3DV4I8Clamp;
- case Intrinsic::nvvm_suld_3d_v4i16_clamp:
- return NVPTXISD::Suld3DV4I16Clamp;
- case Intrinsic::nvvm_suld_3d_v4i32_clamp:
- return NVPTXISD::Suld3DV4I32Clamp;
- case Intrinsic::nvvm_suld_1d_i8_trap:
- return NVPTXISD::Suld1DI8Trap;
- case Intrinsic::nvvm_suld_1d_i16_trap:
- return NVPTXISD::Suld1DI16Trap;
- case Intrinsic::nvvm_suld_1d_i32_trap:
- return NVPTXISD::Suld1DI32Trap;
- case Intrinsic::nvvm_suld_1d_i64_trap:
- return NVPTXISD::Suld1DI64Trap;
- case Intrinsic::nvvm_suld_1d_v2i8_trap:
- return NVPTXISD::Suld1DV2I8Trap;
- case Intrinsic::nvvm_suld_1d_v2i16_trap:
- return NVPTXISD::Suld1DV2I16Trap;
- case Intrinsic::nvvm_suld_1d_v2i32_trap:
- return NVPTXISD::Suld1DV2I32Trap;
- case Intrinsic::nvvm_suld_1d_v2i64_trap:
- return NVPTXISD::Suld1DV2I64Trap;
- case Intrinsic::nvvm_suld_1d_v4i8_trap:
- return NVPTXISD::Suld1DV4I8Trap;
- case Intrinsic::nvvm_suld_1d_v4i16_trap:
- return NVPTXISD::Suld1DV4I16Trap;
- case Intrinsic::nvvm_suld_1d_v4i32_trap:
- return NVPTXISD::Suld1DV4I32Trap;
- case Intrinsic::nvvm_suld_1d_array_i8_trap:
- return NVPTXISD::Suld1DArrayI8Trap;
- case Intrinsic::nvvm_suld_1d_array_i16_trap:
- return NVPTXISD::Suld1DArrayI16Trap;
- case Intrinsic::nvvm_suld_1d_array_i32_trap:
- return NVPTXISD::Suld1DArrayI32Trap;
- case Intrinsic::nvvm_suld_1d_array_i64_trap:
- return NVPTXISD::Suld1DArrayI64Trap;
- case Intrinsic::nvvm_suld_1d_array_v2i8_trap:
- return NVPTXISD::Suld1DArrayV2I8Trap;
- case Intrinsic::nvvm_suld_1d_array_v2i16_trap:
- return NVPTXISD::Suld1DArrayV2I16Trap;
- case Intrinsic::nvvm_suld_1d_array_v2i32_trap:
- return NVPTXISD::Suld1DArrayV2I32Trap;
- case Intrinsic::nvvm_suld_1d_array_v2i64_trap:
- return NVPTXISD::Suld1DArrayV2I64Trap;
- case Intrinsic::nvvm_suld_1d_array_v4i8_trap:
- return NVPTXISD::Suld1DArrayV4I8Trap;
- case Intrinsic::nvvm_suld_1d_array_v4i16_trap:
- return NVPTXISD::Suld1DArrayV4I16Trap;
- case Intrinsic::nvvm_suld_1d_array_v4i32_trap:
- return NVPTXISD::Suld1DArrayV4I32Trap;
- case Intrinsic::nvvm_suld_2d_i8_trap:
- return NVPTXISD::Suld2DI8Trap;
- case Intrinsic::nvvm_suld_2d_i16_trap:
- return NVPTXISD::Suld2DI16Trap;
- case Intrinsic::nvvm_suld_2d_i32_trap:
- return NVPTXISD::Suld2DI32Trap;
- case Intrinsic::nvvm_suld_2d_i64_trap:
- return NVPTXISD::Suld2DI64Trap;
- case Intrinsic::nvvm_suld_2d_v2i8_trap:
- return NVPTXISD::Suld2DV2I8Trap;
- case Intrinsic::nvvm_suld_2d_v2i16_trap:
- return NVPTXISD::Suld2DV2I16Trap;
- case Intrinsic::nvvm_suld_2d_v2i32_trap:
- return NVPTXISD::Suld2DV2I32Trap;
- case Intrinsic::nvvm_suld_2d_v2i64_trap:
- return NVPTXISD::Suld2DV2I64Trap;
- case Intrinsic::nvvm_suld_2d_v4i8_trap:
- return NVPTXISD::Suld2DV4I8Trap;
- case Intrinsic::nvvm_suld_2d_v4i16_trap:
- return NVPTXISD::Suld2DV4I16Trap;
- case Intrinsic::nvvm_suld_2d_v4i32_trap:
- return NVPTXISD::Suld2DV4I32Trap;
- case Intrinsic::nvvm_suld_2d_array_i8_trap:
- return NVPTXISD::Suld2DArrayI8Trap;
- case Intrinsic::nvvm_suld_2d_array_i16_trap:
- return NVPTXISD::Suld2DArrayI16Trap;
- case Intrinsic::nvvm_suld_2d_array_i32_trap:
- return NVPTXISD::Suld2DArrayI32Trap;
- case Intrinsic::nvvm_suld_2d_array_i64_trap:
- return NVPTXISD::Suld2DArrayI64Trap;
- case Intrinsic::nvvm_suld_2d_array_v2i8_trap:
- return NVPTXISD::Suld2DArrayV2I8Trap;
- case Intrinsic::nvvm_suld_2d_array_v2i16_trap:
- return NVPTXISD::Suld2DArrayV2I16Trap;
- case Intrinsic::nvvm_suld_2d_array_v2i32_trap:
- return NVPTXISD::Suld2DArrayV2I32Trap;
- case Intrinsic::nvvm_suld_2d_array_v2i64_trap:
- return NVPTXISD::Suld2DArrayV2I64Trap;
- case Intrinsic::nvvm_suld_2d_array_v4i8_trap:
- return NVPTXISD::Suld2DArrayV4I8Trap;
- case Intrinsic::nvvm_suld_2d_array_v4i16_trap:
- return NVPTXISD::Suld2DArrayV4I16Trap;
- case Intrinsic::nvvm_suld_2d_array_v4i32_trap:
- return NVPTXISD::Suld2DArrayV4I32Trap;
- case Intrinsic::nvvm_suld_3d_i8_trap:
- return NVPTXISD::Suld3DI8Trap;
- case Intrinsic::nvvm_suld_3d_i16_trap:
- return NVPTXISD::Suld3DI16Trap;
- case Intrinsic::nvvm_suld_3d_i32_trap:
- return NVPTXISD::Suld3DI32Trap;
- case Intrinsic::nvvm_suld_3d_i64_trap:
- return NVPTXISD::Suld3DI64Trap;
- case Intrinsic::nvvm_suld_3d_v2i8_trap:
- return NVPTXISD::Suld3DV2I8Trap;
- case Intrinsic::nvvm_suld_3d_v2i16_trap:
- return NVPTXISD::Suld3DV2I16Trap;
- case Intrinsic::nvvm_suld_3d_v2i32_trap:
- return NVPTXISD::Suld3DV2I32Trap;
- case Intrinsic::nvvm_suld_3d_v2i64_trap:
- return NVPTXISD::Suld3DV2I64Trap;
- case Intrinsic::nvvm_suld_3d_v4i8_trap:
- return NVPTXISD::Suld3DV4I8Trap;
- case Intrinsic::nvvm_suld_3d_v4i16_trap:
- return NVPTXISD::Suld3DV4I16Trap;
- case Intrinsic::nvvm_suld_3d_v4i32_trap:
- return NVPTXISD::Suld3DV4I32Trap;
- case Intrinsic::nvvm_suld_1d_i8_zero:
- return NVPTXISD::Suld1DI8Zero;
- case Intrinsic::nvvm_suld_1d_i16_zero:
- return NVPTXISD::Suld1DI16Zero;
- case Intrinsic::nvvm_suld_1d_i32_zero:
- return NVPTXISD::Suld1DI32Zero;
- case Intrinsic::nvvm_suld_1d_i64_zero:
- return NVPTXISD::Suld1DI64Zero;
- case Intrinsic::nvvm_suld_1d_v2i8_zero:
- return NVPTXISD::Suld1DV2I8Zero;
- case Intrinsic::nvvm_suld_1d_v2i16_zero:
- return NVPTXISD::Suld1DV2I16Zero;
- case Intrinsic::nvvm_suld_1d_v2i32_zero:
- return NVPTXISD::Suld1DV2I32Zero;
- case Intrinsic::nvvm_suld_1d_v2i64_zero:
- return NVPTXISD::Suld1DV2I64Zero;
- case Intrinsic::nvvm_suld_1d_v4i8_zero:
- return NVPTXISD::Suld1DV4I8Zero;
- case Intrinsic::nvvm_suld_1d_v4i16_zero:
- return NVPTXISD::Suld1DV4I16Zero;
- case Intrinsic::nvvm_suld_1d_v4i32_zero:
- return NVPTXISD::Suld1DV4I32Zero;
- case Intrinsic::nvvm_suld_1d_array_i8_zero:
- return NVPTXISD::Suld1DArrayI8Zero;
- case Intrinsic::nvvm_suld_1d_array_i16_zero:
- return NVPTXISD::Suld1DArrayI16Zero;
- case Intrinsic::nvvm_suld_1d_array_i32_zero:
- return NVPTXISD::Suld1DArrayI32Zero;
- case Intrinsic::nvvm_suld_1d_array_i64_zero:
- return NVPTXISD::Suld1DArrayI64Zero;
- case Intrinsic::nvvm_suld_1d_array_v2i8_zero:
- return NVPTXISD::Suld1DArrayV2I8Zero;
- case Intrinsic::nvvm_suld_1d_array_v2i16_zero:
- return NVPTXISD::Suld1DArrayV2I16Zero;
- case Intrinsic::nvvm_suld_1d_array_v2i32_zero:
- return NVPTXISD::Suld1DArrayV2I32Zero;
- case Intrinsic::nvvm_suld_1d_array_v2i64_zero:
- return NVPTXISD::Suld1DArrayV2I64Zero;
- case Intrinsic::nvvm_suld_1d_array_v4i8_zero:
- return NVPTXISD::Suld1DArrayV4I8Zero;
- case Intrinsic::nvvm_suld_1d_array_v4i16_zero:
- return NVPTXISD::Suld1DArrayV4I16Zero;
- case Intrinsic::nvvm_suld_1d_array_v4i32_zero:
- return NVPTXISD::Suld1DArrayV4I32Zero;
- case Intrinsic::nvvm_suld_2d_i8_zero:
- return NVPTXISD::Suld2DI8Zero;
- case Intrinsic::nvvm_suld_2d_i16_zero:
- return NVPTXISD::Suld2DI16Zero;
- case Intrinsic::nvvm_suld_2d_i32_zero:
- return NVPTXISD::Suld2DI32Zero;
- case Intrinsic::nvvm_suld_2d_i64_zero:
- return NVPTXISD::Suld2DI64Zero;
- case Intrinsic::nvvm_suld_2d_v2i8_zero:
- return NVPTXISD::Suld2DV2I8Zero;
- case Intrinsic::nvvm_suld_2d_v2i16_zero:
- return NVPTXISD::Suld2DV2I16Zero;
- case Intrinsic::nvvm_suld_2d_v2i32_zero:
- return NVPTXISD::Suld2DV2I32Zero;
- case Intrinsic::nvvm_suld_2d_v2i64_zero:
- return NVPTXISD::Suld2DV2I64Zero;
- case Intrinsic::nvvm_suld_2d_v4i8_zero:
- return NVPTXISD::Suld2DV4I8Zero;
- case Intrinsic::nvvm_suld_2d_v4i16_zero:
- return NVPTXISD::Suld2DV4I16Zero;
- case Intrinsic::nvvm_suld_2d_v4i32_zero:
- return NVPTXISD::Suld2DV4I32Zero;
- case Intrinsic::nvvm_suld_2d_array_i8_zero:
- return NVPTXISD::Suld2DArrayI8Zero;
- case Intrinsic::nvvm_suld_2d_array_i16_zero:
- return NVPTXISD::Suld2DArrayI16Zero;
- case Intrinsic::nvvm_suld_2d_array_i32_zero:
- return NVPTXISD::Suld2DArrayI32Zero;
- case Intrinsic::nvvm_suld_2d_array_i64_zero:
- return NVPTXISD::Suld2DArrayI64Zero;
- case Intrinsic::nvvm_suld_2d_array_v2i8_zero:
- return NVPTXISD::Suld2DArrayV2I8Zero;
- case Intrinsic::nvvm_suld_2d_array_v2i16_zero:
- return NVPTXISD::Suld2DArrayV2I16Zero;
- case Intrinsic::nvvm_suld_2d_array_v2i32_zero:
- return NVPTXISD::Suld2DArrayV2I32Zero;
- case Intrinsic::nvvm_suld_2d_array_v2i64_zero:
- return NVPTXISD::Suld2DArrayV2I64Zero;
- case Intrinsic::nvvm_suld_2d_array_v4i8_zero:
- return NVPTXISD::Suld2DArrayV4I8Zero;
- case Intrinsic::nvvm_suld_2d_array_v4i16_zero:
- return NVPTXISD::Suld2DArrayV4I16Zero;
- case Intrinsic::nvvm_suld_2d_array_v4i32_zero:
- return NVPTXISD::Suld2DArrayV4I32Zero;
- case Intrinsic::nvvm_suld_3d_i8_zero:
- return NVPTXISD::Suld3DI8Zero;
- case Intrinsic::nvvm_suld_3d_i16_zero:
- return NVPTXISD::Suld3DI16Zero;
- case Intrinsic::nvvm_suld_3d_i32_zero:
- return NVPTXISD::Suld3DI32Zero;
- case Intrinsic::nvvm_suld_3d_i64_zero:
- return NVPTXISD::Suld3DI64Zero;
- case Intrinsic::nvvm_suld_3d_v2i8_zero:
- return NVPTXISD::Suld3DV2I8Zero;
- case Intrinsic::nvvm_suld_3d_v2i16_zero:
- return NVPTXISD::Suld3DV2I16Zero;
- case Intrinsic::nvvm_suld_3d_v2i32_zero:
- return NVPTXISD::Suld3DV2I32Zero;
- case Intrinsic::nvvm_suld_3d_v2i64_zero:
- return NVPTXISD::Suld3DV2I64Zero;
- case Intrinsic::nvvm_suld_3d_v4i8_zero:
- return NVPTXISD::Suld3DV4I8Zero;
- case Intrinsic::nvvm_suld_3d_v4i16_zero:
- return NVPTXISD::Suld3DV4I16Zero;
- case Intrinsic::nvvm_suld_3d_v4i32_zero:
- return NVPTXISD::Suld3DV4I32Zero;
- }
-}
-
-// llvm.ptx.memcpy.const and llvm.ptx.memmove.const need to be modeled as
-// TgtMemIntrinsic
-// because we need the information that is only available in the "Value" type
-// of destination
-// pointer. In particular, the address space information.
-bool NVPTXTargetLowering::getTgtMemIntrinsic(
- IntrinsicInfo &Info, const CallInst &I, unsigned Intrinsic) const {
- switch (Intrinsic) {
- default:
- return false;
-
- case Intrinsic::nvvm_atomic_load_add_f32:
- case Intrinsic::nvvm_atomic_load_inc_32:
- case Intrinsic::nvvm_atomic_load_dec_32:
-
- case Intrinsic::nvvm_atomic_add_gen_f_cta:
- case Intrinsic::nvvm_atomic_add_gen_f_sys:
- case Intrinsic::nvvm_atomic_add_gen_i_cta:
- case Intrinsic::nvvm_atomic_add_gen_i_sys:
- case Intrinsic::nvvm_atomic_and_gen_i_cta:
- case Intrinsic::nvvm_atomic_and_gen_i_sys:
- case Intrinsic::nvvm_atomic_cas_gen_i_cta:
- case Intrinsic::nvvm_atomic_cas_gen_i_sys:
- case Intrinsic::nvvm_atomic_dec_gen_i_cta:
- case Intrinsic::nvvm_atomic_dec_gen_i_sys:
- case Intrinsic::nvvm_atomic_inc_gen_i_cta:
- case Intrinsic::nvvm_atomic_inc_gen_i_sys:
- case Intrinsic::nvvm_atomic_max_gen_i_cta:
- case Intrinsic::nvvm_atomic_max_gen_i_sys:
- case Intrinsic::nvvm_atomic_min_gen_i_cta:
- case Intrinsic::nvvm_atomic_min_gen_i_sys:
- case Intrinsic::nvvm_atomic_or_gen_i_cta:
- case Intrinsic::nvvm_atomic_or_gen_i_sys:
- case Intrinsic::nvvm_atomic_exch_gen_i_cta:
- case Intrinsic::nvvm_atomic_exch_gen_i_sys:
- case Intrinsic::nvvm_atomic_xor_gen_i_cta:
- case Intrinsic::nvvm_atomic_xor_gen_i_sys: {
- auto &DL = I.getModule()->getDataLayout();
- Info.opc = ISD::INTRINSIC_W_CHAIN;
- Info.memVT = getValueType(DL, I.getType());
- Info.ptrVal = I.getArgOperand(0);
- Info.offset = 0;
- Info.vol = false;
- Info.readMem = true;
- Info.writeMem = true;
- Info.align = 0;
- return true;
- }
-
- case Intrinsic::nvvm_ldu_global_i:
- case Intrinsic::nvvm_ldu_global_f:
- case Intrinsic::nvvm_ldu_global_p: {
- auto &DL = I.getModule()->getDataLayout();
- Info.opc = ISD::INTRINSIC_W_CHAIN;
- if (Intrinsic == Intrinsic::nvvm_ldu_global_i)
- Info.memVT = getValueType(DL, I.getType());
- else if(Intrinsic == Intrinsic::nvvm_ldu_global_p)
- Info.memVT = getPointerTy(DL);
- else
- Info.memVT = getValueType(DL, I.getType());
- Info.ptrVal = I.getArgOperand(0);
- Info.offset = 0;
- Info.vol = false;
- Info.readMem = true;
- Info.writeMem = false;
- Info.align = cast<ConstantInt>(I.getArgOperand(1))->getZExtValue();
-
- return true;
- }
- case Intrinsic::nvvm_ldg_global_i:
- case Intrinsic::nvvm_ldg_global_f:
- case Intrinsic::nvvm_ldg_global_p: {
- auto &DL = I.getModule()->getDataLayout();
-
- Info.opc = ISD::INTRINSIC_W_CHAIN;
- if (Intrinsic == Intrinsic::nvvm_ldg_global_i)
- Info.memVT = getValueType(DL, I.getType());
- else if(Intrinsic == Intrinsic::nvvm_ldg_global_p)
- Info.memVT = getPointerTy(DL);
- else
- Info.memVT = getValueType(DL, I.getType());
- Info.ptrVal = I.getArgOperand(0);
- Info.offset = 0;
- Info.vol = false;
- Info.readMem = true;
- Info.writeMem = false;
- Info.align = cast<ConstantInt>(I.getArgOperand(1))->getZExtValue();
-
- return true;
- }
-
- case Intrinsic::nvvm_tex_1d_v4f32_s32:
- case Intrinsic::nvvm_tex_1d_v4f32_f32:
- case Intrinsic::nvvm_tex_1d_level_v4f32_f32:
- case Intrinsic::nvvm_tex_1d_grad_v4f32_f32:
- case Intrinsic::nvvm_tex_1d_array_v4f32_s32:
- case Intrinsic::nvvm_tex_1d_array_v4f32_f32:
- case Intrinsic::nvvm_tex_1d_array_level_v4f32_f32:
- case Intrinsic::nvvm_tex_1d_array_grad_v4f32_f32:
- case Intrinsic::nvvm_tex_2d_v4f32_s32:
- case Intrinsic::nvvm_tex_2d_v4f32_f32:
- case Intrinsic::nvvm_tex_2d_level_v4f32_f32:
- case Intrinsic::nvvm_tex_2d_grad_v4f32_f32:
- case Intrinsic::nvvm_tex_2d_array_v4f32_s32:
- case Intrinsic::nvvm_tex_2d_array_v4f32_f32:
- case Intrinsic::nvvm_tex_2d_array_level_v4f32_f32:
- case Intrinsic::nvvm_tex_2d_array_grad_v4f32_f32:
- case Intrinsic::nvvm_tex_3d_v4f32_s32:
- case Intrinsic::nvvm_tex_3d_v4f32_f32:
- case Intrinsic::nvvm_tex_3d_level_v4f32_f32:
- case Intrinsic::nvvm_tex_3d_grad_v4f32_f32:
- case Intrinsic::nvvm_tex_cube_v4f32_f32:
- case Intrinsic::nvvm_tex_cube_level_v4f32_f32:
- case Intrinsic::nvvm_tex_cube_array_v4f32_f32:
- case Intrinsic::nvvm_tex_cube_array_level_v4f32_f32:
- case Intrinsic::nvvm_tld4_r_2d_v4f32_f32:
- case Intrinsic::nvvm_tld4_g_2d_v4f32_f32:
- case Intrinsic::nvvm_tld4_b_2d_v4f32_f32:
- case Intrinsic::nvvm_tld4_a_2d_v4f32_f32:
- case Intrinsic::nvvm_tex_unified_1d_v4f32_s32:
- case Intrinsic::nvvm_tex_unified_1d_v4f32_f32:
- case Intrinsic::nvvm_tex_unified_1d_level_v4f32_f32:
- case Intrinsic::nvvm_tex_unified_1d_grad_v4f32_f32:
- case Intrinsic::nvvm_tex_unified_1d_array_v4f32_s32:
- case Intrinsic::nvvm_tex_unified_1d_array_v4f32_f32:
- case Intrinsic::nvvm_tex_unified_1d_array_level_v4f32_f32:
- case Intrinsic::nvvm_tex_unified_1d_array_grad_v4f32_f32:
- case Intrinsic::nvvm_tex_unified_2d_v4f32_s32:
- case Intrinsic::nvvm_tex_unified_2d_v4f32_f32:
- case Intrinsic::nvvm_tex_unified_2d_level_v4f32_f32:
- case Intrinsic::nvvm_tex_unified_2d_grad_v4f32_f32:
- case Intrinsic::nvvm_tex_unified_2d_array_v4f32_s32:
- case Intrinsic::nvvm_tex_unified_2d_array_v4f32_f32:
- case Intrinsic::nvvm_tex_unified_2d_array_level_v4f32_f32:
- case Intrinsic::nvvm_tex_unified_2d_array_grad_v4f32_f32:
- case Intrinsic::nvvm_tex_unified_3d_v4f32_s32:
- case Intrinsic::nvvm_tex_unified_3d_v4f32_f32:
- case Intrinsic::nvvm_tex_unified_3d_level_v4f32_f32:
- case Intrinsic::nvvm_tex_unified_3d_grad_v4f32_f32:
- case Intrinsic::nvvm_tex_unified_cube_v4f32_f32:
- case Intrinsic::nvvm_tex_unified_cube_level_v4f32_f32:
- case Intrinsic::nvvm_tex_unified_cube_array_v4f32_f32:
- case Intrinsic::nvvm_tex_unified_cube_array_level_v4f32_f32:
- case Intrinsic::nvvm_tld4_unified_r_2d_v4f32_f32:
- case Intrinsic::nvvm_tld4_unified_g_2d_v4f32_f32:
- case Intrinsic::nvvm_tld4_unified_b_2d_v4f32_f32:
- case Intrinsic::nvvm_tld4_unified_a_2d_v4f32_f32:
- Info.opc = getOpcForTextureInstr(Intrinsic);
- Info.memVT = MVT::v4f32;
- Info.ptrVal = nullptr;
- Info.offset = 0;
- Info.vol = false;
- Info.readMem = true;
- Info.writeMem = false;
- Info.align = 16;
- return true;
-
- case Intrinsic::nvvm_tex_1d_v4s32_s32:
- case Intrinsic::nvvm_tex_1d_v4s32_f32:
- case Intrinsic::nvvm_tex_1d_level_v4s32_f32:
- case Intrinsic::nvvm_tex_1d_grad_v4s32_f32:
- case Intrinsic::nvvm_tex_1d_array_v4s32_s32:
- case Intrinsic::nvvm_tex_1d_array_v4s32_f32:
- case Intrinsic::nvvm_tex_1d_array_level_v4s32_f32:
- case Intrinsic::nvvm_tex_1d_array_grad_v4s32_f32:
- case Intrinsic::nvvm_tex_2d_v4s32_s32:
- case Intrinsic::nvvm_tex_2d_v4s32_f32:
- case Intrinsic::nvvm_tex_2d_level_v4s32_f32:
- case Intrinsic::nvvm_tex_2d_grad_v4s32_f32:
- case Intrinsic::nvvm_tex_2d_array_v4s32_s32:
- case Intrinsic::nvvm_tex_2d_array_v4s32_f32:
- case Intrinsic::nvvm_tex_2d_array_level_v4s32_f32:
- case Intrinsic::nvvm_tex_2d_array_grad_v4s32_f32:
- case Intrinsic::nvvm_tex_3d_v4s32_s32:
- case Intrinsic::nvvm_tex_3d_v4s32_f32:
- case Intrinsic::nvvm_tex_3d_level_v4s32_f32:
- case Intrinsic::nvvm_tex_3d_grad_v4s32_f32:
- case Intrinsic::nvvm_tex_cube_v4s32_f32:
- case Intrinsic::nvvm_tex_cube_level_v4s32_f32:
- case Intrinsic::nvvm_tex_cube_array_v4s32_f32:
- case Intrinsic::nvvm_tex_cube_array_level_v4s32_f32:
- case Intrinsic::nvvm_tex_cube_v4u32_f32:
- case Intrinsic::nvvm_tex_cube_level_v4u32_f32:
- case Intrinsic::nvvm_tex_cube_array_v4u32_f32:
- case Intrinsic::nvvm_tex_cube_array_level_v4u32_f32:
- case Intrinsic::nvvm_tex_1d_v4u32_s32:
- case Intrinsic::nvvm_tex_1d_v4u32_f32:
- case Intrinsic::nvvm_tex_1d_level_v4u32_f32:
- case Intrinsic::nvvm_tex_1d_grad_v4u32_f32:
- case Intrinsic::nvvm_tex_1d_array_v4u32_s32:
- case Intrinsic::nvvm_tex_1d_array_v4u32_f32:
- case Intrinsic::nvvm_tex_1d_array_level_v4u32_f32:
- case Intrinsic::nvvm_tex_1d_array_grad_v4u32_f32:
- case Intrinsic::nvvm_tex_2d_v4u32_s32:
- case Intrinsic::nvvm_tex_2d_v4u32_f32:
- case Intrinsic::nvvm_tex_2d_level_v4u32_f32:
- case Intrinsic::nvvm_tex_2d_grad_v4u32_f32:
- case Intrinsic::nvvm_tex_2d_array_v4u32_s32:
- case Intrinsic::nvvm_tex_2d_array_v4u32_f32:
- case Intrinsic::nvvm_tex_2d_array_level_v4u32_f32:
- case Intrinsic::nvvm_tex_2d_array_grad_v4u32_f32:
- case Intrinsic::nvvm_tex_3d_v4u32_s32:
- case Intrinsic::nvvm_tex_3d_v4u32_f32:
- case Intrinsic::nvvm_tex_3d_level_v4u32_f32:
- case Intrinsic::nvvm_tex_3d_grad_v4u32_f32:
- case Intrinsic::nvvm_tld4_r_2d_v4s32_f32:
- case Intrinsic::nvvm_tld4_g_2d_v4s32_f32:
- case Intrinsic::nvvm_tld4_b_2d_v4s32_f32:
- case Intrinsic::nvvm_tld4_a_2d_v4s32_f32:
- case Intrinsic::nvvm_tld4_r_2d_v4u32_f32:
- case Intrinsic::nvvm_tld4_g_2d_v4u32_f32:
- case Intrinsic::nvvm_tld4_b_2d_v4u32_f32:
- case Intrinsic::nvvm_tld4_a_2d_v4u32_f32:
- case Intrinsic::nvvm_tex_unified_1d_v4s32_s32:
- case Intrinsic::nvvm_tex_unified_1d_v4s32_f32:
- case Intrinsic::nvvm_tex_unified_1d_level_v4s32_f32:
- case Intrinsic::nvvm_tex_unified_1d_grad_v4s32_f32:
- case Intrinsic::nvvm_tex_unified_1d_array_v4s32_s32:
- case Intrinsic::nvvm_tex_unified_1d_array_v4s32_f32:
- case Intrinsic::nvvm_tex_unified_1d_array_level_v4s32_f32:
- case Intrinsic::nvvm_tex_unified_1d_array_grad_v4s32_f32:
- case Intrinsic::nvvm_tex_unified_2d_v4s32_s32:
- case Intrinsic::nvvm_tex_unified_2d_v4s32_f32:
- case Intrinsic::nvvm_tex_unified_2d_level_v4s32_f32:
- case Intrinsic::nvvm_tex_unified_2d_grad_v4s32_f32:
- case Intrinsic::nvvm_tex_unified_2d_array_v4s32_s32:
- case Intrinsic::nvvm_tex_unified_2d_array_v4s32_f32:
- case Intrinsic::nvvm_tex_unified_2d_array_level_v4s32_f32:
- case Intrinsic::nvvm_tex_unified_2d_array_grad_v4s32_f32:
- case Intrinsic::nvvm_tex_unified_3d_v4s32_s32:
- case Intrinsic::nvvm_tex_unified_3d_v4s32_f32:
- case Intrinsic::nvvm_tex_unified_3d_level_v4s32_f32:
- case Intrinsic::nvvm_tex_unified_3d_grad_v4s32_f32:
- case Intrinsic::nvvm_tex_unified_1d_v4u32_s32:
- case Intrinsic::nvvm_tex_unified_1d_v4u32_f32:
- case Intrinsic::nvvm_tex_unified_1d_level_v4u32_f32:
- case Intrinsic::nvvm_tex_unified_1d_grad_v4u32_f32:
- case Intrinsic::nvvm_tex_unified_1d_array_v4u32_s32:
- case Intrinsic::nvvm_tex_unified_1d_array_v4u32_f32:
- case Intrinsic::nvvm_tex_unified_1d_array_level_v4u32_f32:
- case Intrinsic::nvvm_tex_unified_1d_array_grad_v4u32_f32:
- case Intrinsic::nvvm_tex_unified_2d_v4u32_s32:
- case Intrinsic::nvvm_tex_unified_2d_v4u32_f32:
- case Intrinsic::nvvm_tex_unified_2d_level_v4u32_f32:
- case Intrinsic::nvvm_tex_unified_2d_grad_v4u32_f32:
- case Intrinsic::nvvm_tex_unified_2d_array_v4u32_s32:
- case Intrinsic::nvvm_tex_unified_2d_array_v4u32_f32:
- case Intrinsic::nvvm_tex_unified_2d_array_level_v4u32_f32:
- case Intrinsic::nvvm_tex_unified_2d_array_grad_v4u32_f32:
- case Intrinsic::nvvm_tex_unified_3d_v4u32_s32:
- case Intrinsic::nvvm_tex_unified_3d_v4u32_f32:
- case Intrinsic::nvvm_tex_unified_3d_level_v4u32_f32:
- case Intrinsic::nvvm_tex_unified_3d_grad_v4u32_f32:
- case Intrinsic::nvvm_tex_unified_cube_v4s32_f32:
- case Intrinsic::nvvm_tex_unified_cube_level_v4s32_f32:
- case Intrinsic::nvvm_tex_unified_cube_array_v4s32_f32:
- case Intrinsic::nvvm_tex_unified_cube_array_level_v4s32_f32:
- case Intrinsic::nvvm_tex_unified_cube_v4u32_f32:
- case Intrinsic::nvvm_tex_unified_cube_level_v4u32_f32:
- case Intrinsic::nvvm_tex_unified_cube_array_v4u32_f32:
- case Intrinsic::nvvm_tex_unified_cube_array_level_v4u32_f32:
- case Intrinsic::nvvm_tld4_unified_r_2d_v4s32_f32:
- case Intrinsic::nvvm_tld4_unified_g_2d_v4s32_f32:
- case Intrinsic::nvvm_tld4_unified_b_2d_v4s32_f32:
- case Intrinsic::nvvm_tld4_unified_a_2d_v4s32_f32:
- case Intrinsic::nvvm_tld4_unified_r_2d_v4u32_f32:
- case Intrinsic::nvvm_tld4_unified_g_2d_v4u32_f32:
- case Intrinsic::nvvm_tld4_unified_b_2d_v4u32_f32:
- case Intrinsic::nvvm_tld4_unified_a_2d_v4u32_f32:
- Info.opc = getOpcForTextureInstr(Intrinsic);
- Info.memVT = MVT::v4i32;
- Info.ptrVal = nullptr;
- Info.offset = 0;
- Info.vol = false;
- Info.readMem = true;
- Info.writeMem = false;
- Info.align = 16;
- return true;
-
- case Intrinsic::nvvm_suld_1d_i8_clamp:
- case Intrinsic::nvvm_suld_1d_v2i8_clamp:
- case Intrinsic::nvvm_suld_1d_v4i8_clamp:
- case Intrinsic::nvvm_suld_1d_array_i8_clamp:
- case Intrinsic::nvvm_suld_1d_array_v2i8_clamp:
- case Intrinsic::nvvm_suld_1d_array_v4i8_clamp:
- case Intrinsic::nvvm_suld_2d_i8_clamp:
- case Intrinsic::nvvm_suld_2d_v2i8_clamp:
- case Intrinsic::nvvm_suld_2d_v4i8_clamp:
- case Intrinsic::nvvm_suld_2d_array_i8_clamp:
- case Intrinsic::nvvm_suld_2d_array_v2i8_clamp:
- case Intrinsic::nvvm_suld_2d_array_v4i8_clamp:
- case Intrinsic::nvvm_suld_3d_i8_clamp:
- case Intrinsic::nvvm_suld_3d_v2i8_clamp:
- case Intrinsic::nvvm_suld_3d_v4i8_clamp:
- case Intrinsic::nvvm_suld_1d_i8_trap:
- case Intrinsic::nvvm_suld_1d_v2i8_trap:
- case Intrinsic::nvvm_suld_1d_v4i8_trap:
- case Intrinsic::nvvm_suld_1d_array_i8_trap:
- case Intrinsic::nvvm_suld_1d_array_v2i8_trap:
- case Intrinsic::nvvm_suld_1d_array_v4i8_trap:
- case Intrinsic::nvvm_suld_2d_i8_trap:
- case Intrinsic::nvvm_suld_2d_v2i8_trap:
- case Intrinsic::nvvm_suld_2d_v4i8_trap:
- case Intrinsic::nvvm_suld_2d_array_i8_trap:
- case Intrinsic::nvvm_suld_2d_array_v2i8_trap:
- case Intrinsic::nvvm_suld_2d_array_v4i8_trap:
- case Intrinsic::nvvm_suld_3d_i8_trap:
- case Intrinsic::nvvm_suld_3d_v2i8_trap:
- case Intrinsic::nvvm_suld_3d_v4i8_trap:
- case Intrinsic::nvvm_suld_1d_i8_zero:
- case Intrinsic::nvvm_suld_1d_v2i8_zero:
- case Intrinsic::nvvm_suld_1d_v4i8_zero:
- case Intrinsic::nvvm_suld_1d_array_i8_zero:
- case Intrinsic::nvvm_suld_1d_array_v2i8_zero:
- case Intrinsic::nvvm_suld_1d_array_v4i8_zero:
- case Intrinsic::nvvm_suld_2d_i8_zero:
- case Intrinsic::nvvm_suld_2d_v2i8_zero:
- case Intrinsic::nvvm_suld_2d_v4i8_zero:
- case Intrinsic::nvvm_suld_2d_array_i8_zero:
- case Intrinsic::nvvm_suld_2d_array_v2i8_zero:
- case Intrinsic::nvvm_suld_2d_array_v4i8_zero:
- case Intrinsic::nvvm_suld_3d_i8_zero:
- case Intrinsic::nvvm_suld_3d_v2i8_zero:
- case Intrinsic::nvvm_suld_3d_v4i8_zero:
- Info.opc = getOpcForSurfaceInstr(Intrinsic);
- Info.memVT = MVT::i8;
- Info.ptrVal = nullptr;
- Info.offset = 0;
- Info.vol = false;
- Info.readMem = true;
- Info.writeMem = false;
- Info.align = 16;
- return true;
-
- case Intrinsic::nvvm_suld_1d_i16_clamp:
- case Intrinsic::nvvm_suld_1d_v2i16_clamp:
- case Intrinsic::nvvm_suld_1d_v4i16_clamp:
- case Intrinsic::nvvm_suld_1d_array_i16_clamp:
- case Intrinsic::nvvm_suld_1d_array_v2i16_clamp:
- case Intrinsic::nvvm_suld_1d_array_v4i16_clamp:
- case Intrinsic::nvvm_suld_2d_i16_clamp:
- case Intrinsic::nvvm_suld_2d_v2i16_clamp:
- case Intrinsic::nvvm_suld_2d_v4i16_clamp:
- case Intrinsic::nvvm_suld_2d_array_i16_clamp:
- case Intrinsic::nvvm_suld_2d_array_v2i16_clamp:
- case Intrinsic::nvvm_suld_2d_array_v4i16_clamp:
- case Intrinsic::nvvm_suld_3d_i16_clamp:
- case Intrinsic::nvvm_suld_3d_v2i16_clamp:
- case Intrinsic::nvvm_suld_3d_v4i16_clamp:
- case Intrinsic::nvvm_suld_1d_i16_trap:
- case Intrinsic::nvvm_suld_1d_v2i16_trap:
- case Intrinsic::nvvm_suld_1d_v4i16_trap:
- case Intrinsic::nvvm_suld_1d_array_i16_trap:
- case Intrinsic::nvvm_suld_1d_array_v2i16_trap:
- case Intrinsic::nvvm_suld_1d_array_v4i16_trap:
- case Intrinsic::nvvm_suld_2d_i16_trap:
- case Intrinsic::nvvm_suld_2d_v2i16_trap:
- case Intrinsic::nvvm_suld_2d_v4i16_trap:
- case Intrinsic::nvvm_suld_2d_array_i16_trap:
- case Intrinsic::nvvm_suld_2d_array_v2i16_trap:
- case Intrinsic::nvvm_suld_2d_array_v4i16_trap:
- case Intrinsic::nvvm_suld_3d_i16_trap:
- case Intrinsic::nvvm_suld_3d_v2i16_trap:
- case Intrinsic::nvvm_suld_3d_v4i16_trap:
- case Intrinsic::nvvm_suld_1d_i16_zero:
- case Intrinsic::nvvm_suld_1d_v2i16_zero:
- case Intrinsic::nvvm_suld_1d_v4i16_zero:
- case Intrinsic::nvvm_suld_1d_array_i16_zero:
- case Intrinsic::nvvm_suld_1d_array_v2i16_zero:
- case Intrinsic::nvvm_suld_1d_array_v4i16_zero:
- case Intrinsic::nvvm_suld_2d_i16_zero:
- case Intrinsic::nvvm_suld_2d_v2i16_zero:
- case Intrinsic::nvvm_suld_2d_v4i16_zero:
- case Intrinsic::nvvm_suld_2d_array_i16_zero:
- case Intrinsic::nvvm_suld_2d_array_v2i16_zero:
- case Intrinsic::nvvm_suld_2d_array_v4i16_zero:
- case Intrinsic::nvvm_suld_3d_i16_zero:
- case Intrinsic::nvvm_suld_3d_v2i16_zero:
- case Intrinsic::nvvm_suld_3d_v4i16_zero:
- Info.opc = getOpcForSurfaceInstr(Intrinsic);
- Info.memVT = MVT::i16;
- Info.ptrVal = nullptr;
- Info.offset = 0;
- Info.vol = false;
- Info.readMem = true;
- Info.writeMem = false;
- Info.align = 16;
- return true;
-
- case Intrinsic::nvvm_suld_1d_i32_clamp:
- case Intrinsic::nvvm_suld_1d_v2i32_clamp:
- case Intrinsic::nvvm_suld_1d_v4i32_clamp:
- case Intrinsic::nvvm_suld_1d_array_i32_clamp:
- case Intrinsic::nvvm_suld_1d_array_v2i32_clamp:
- case Intrinsic::nvvm_suld_1d_array_v4i32_clamp:
- case Intrinsic::nvvm_suld_2d_i32_clamp:
- case Intrinsic::nvvm_suld_2d_v2i32_clamp:
- case Intrinsic::nvvm_suld_2d_v4i32_clamp:
- case Intrinsic::nvvm_suld_2d_array_i32_clamp:
- case Intrinsic::nvvm_suld_2d_array_v2i32_clamp:
- case Intrinsic::nvvm_suld_2d_array_v4i32_clamp:
- case Intrinsic::nvvm_suld_3d_i32_clamp:
- case Intrinsic::nvvm_suld_3d_v2i32_clamp:
- case Intrinsic::nvvm_suld_3d_v4i32_clamp:
- case Intrinsic::nvvm_suld_1d_i32_trap:
- case Intrinsic::nvvm_suld_1d_v2i32_trap:
- case Intrinsic::nvvm_suld_1d_v4i32_trap:
- case Intrinsic::nvvm_suld_1d_array_i32_trap:
- case Intrinsic::nvvm_suld_1d_array_v2i32_trap:
- case Intrinsic::nvvm_suld_1d_array_v4i32_trap:
- case Intrinsic::nvvm_suld_2d_i32_trap:
- case Intrinsic::nvvm_suld_2d_v2i32_trap:
- case Intrinsic::nvvm_suld_2d_v4i32_trap:
- case Intrinsic::nvvm_suld_2d_array_i32_trap:
- case Intrinsic::nvvm_suld_2d_array_v2i32_trap:
- case Intrinsic::nvvm_suld_2d_array_v4i32_trap:
- case Intrinsic::nvvm_suld_3d_i32_trap:
- case Intrinsic::nvvm_suld_3d_v2i32_trap:
- case Intrinsic::nvvm_suld_3d_v4i32_trap:
- case Intrinsic::nvvm_suld_1d_i32_zero:
- case Intrinsic::nvvm_suld_1d_v2i32_zero:
- case Intrinsic::nvvm_suld_1d_v4i32_zero:
- case Intrinsic::nvvm_suld_1d_array_i32_zero:
- case Intrinsic::nvvm_suld_1d_array_v2i32_zero:
- case Intrinsic::nvvm_suld_1d_array_v4i32_zero:
- case Intrinsic::nvvm_suld_2d_i32_zero:
- case Intrinsic::nvvm_suld_2d_v2i32_zero:
- case Intrinsic::nvvm_suld_2d_v4i32_zero:
- case Intrinsic::nvvm_suld_2d_array_i32_zero:
- case Intrinsic::nvvm_suld_2d_array_v2i32_zero:
- case Intrinsic::nvvm_suld_2d_array_v4i32_zero:
- case Intrinsic::nvvm_suld_3d_i32_zero:
- case Intrinsic::nvvm_suld_3d_v2i32_zero:
- case Intrinsic::nvvm_suld_3d_v4i32_zero:
- Info.opc = getOpcForSurfaceInstr(Intrinsic);
- Info.memVT = MVT::i32;
- Info.ptrVal = nullptr;
- Info.offset = 0;
- Info.vol = false;
- Info.readMem = true;
- Info.writeMem = false;
- Info.align = 16;
- return true;
-
- case Intrinsic::nvvm_suld_1d_i64_clamp:
- case Intrinsic::nvvm_suld_1d_v2i64_clamp:
- case Intrinsic::nvvm_suld_1d_array_i64_clamp:
- case Intrinsic::nvvm_suld_1d_array_v2i64_clamp:
- case Intrinsic::nvvm_suld_2d_i64_clamp:
- case Intrinsic::nvvm_suld_2d_v2i64_clamp:
- case Intrinsic::nvvm_suld_2d_array_i64_clamp:
- case Intrinsic::nvvm_suld_2d_array_v2i64_clamp:
- case Intrinsic::nvvm_suld_3d_i64_clamp:
- case Intrinsic::nvvm_suld_3d_v2i64_clamp:
- case Intrinsic::nvvm_suld_1d_i64_trap:
- case Intrinsic::nvvm_suld_1d_v2i64_trap:
- case Intrinsic::nvvm_suld_1d_array_i64_trap:
- case Intrinsic::nvvm_suld_1d_array_v2i64_trap:
- case Intrinsic::nvvm_suld_2d_i64_trap:
- case Intrinsic::nvvm_suld_2d_v2i64_trap:
- case Intrinsic::nvvm_suld_2d_array_i64_trap:
- case Intrinsic::nvvm_suld_2d_array_v2i64_trap:
- case Intrinsic::nvvm_suld_3d_i64_trap:
- case Intrinsic::nvvm_suld_3d_v2i64_trap:
- case Intrinsic::nvvm_suld_1d_i64_zero:
- case Intrinsic::nvvm_suld_1d_v2i64_zero:
- case Intrinsic::nvvm_suld_1d_array_i64_zero:
- case Intrinsic::nvvm_suld_1d_array_v2i64_zero:
- case Intrinsic::nvvm_suld_2d_i64_zero:
- case Intrinsic::nvvm_suld_2d_v2i64_zero:
- case Intrinsic::nvvm_suld_2d_array_i64_zero:
- case Intrinsic::nvvm_suld_2d_array_v2i64_zero:
- case Intrinsic::nvvm_suld_3d_i64_zero:
- case Intrinsic::nvvm_suld_3d_v2i64_zero:
- Info.opc = getOpcForSurfaceInstr(Intrinsic);
- Info.memVT = MVT::i64;
- Info.ptrVal = nullptr;
- Info.offset = 0;
- Info.vol = false;
- Info.readMem = true;
- Info.writeMem = false;
- Info.align = 16;
- return true;
- }
- return false;
-}
-
-/// isLegalAddressingMode - Return true if the addressing mode represented
-/// by AM is legal for this target, for a load/store of the specified type.
-/// Used to guide target specific optimizations, like loop strength reduction
-/// (LoopStrengthReduce.cpp) and memory optimization for address mode
-/// (CodeGenPrepare.cpp)
-bool NVPTXTargetLowering::isLegalAddressingMode(const DataLayout &DL,
- const AddrMode &AM, Type *Ty,
- unsigned AS) const {
- // AddrMode - This represents an addressing mode of:
- // BaseGV + BaseOffs + BaseReg + Scale*ScaleReg
- //
- // The legal address modes are
- // - [avar]
- // - [areg]
- // - [areg+immoff]
- // - [immAddr]
-
- if (AM.BaseGV) {
- return !AM.BaseOffs && !AM.HasBaseReg && !AM.Scale;
- }
-
- switch (AM.Scale) {
- case 0: // "r", "r+i" or "i" is allowed
- break;
- case 1:
- if (AM.HasBaseReg) // "r+r+i" or "r+r" is not allowed.
- return false;
- // Otherwise we have r+i.
- break;
- default:
- // No scale > 1 is allowed
- return false;
- }
- return true;
-}
-
-//===----------------------------------------------------------------------===//
-// NVPTX Inline Assembly Support
-//===----------------------------------------------------------------------===//
-
-/// getConstraintType - Given a constraint letter, return the type of
-/// constraint it is for this target.
-NVPTXTargetLowering::ConstraintType
-NVPTXTargetLowering::getConstraintType(StringRef Constraint) const {
- if (Constraint.size() == 1) {
- switch (Constraint[0]) {
- default:
- break;
- case 'b':
- case 'r':
- case 'h':
- case 'c':
- case 'l':
- case 'f':
- case 'd':
- case '0':
- case 'N':
- return C_RegisterClass;
- }
- }
- return TargetLowering::getConstraintType(Constraint);
-}
-
-std::pair<unsigned, const TargetRegisterClass *>
-NVPTXTargetLowering::getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI,
- StringRef Constraint,
- MVT VT) const {
- if (Constraint.size() == 1) {
- switch (Constraint[0]) {
- case 'b':
- return std::make_pair(0U, &NVPTX::Int1RegsRegClass);
- case 'c':
- return std::make_pair(0U, &NVPTX::Int16RegsRegClass);
- case 'h':
- return std::make_pair(0U, &NVPTX::Int16RegsRegClass);
- case 'r':
- return std::make_pair(0U, &NVPTX::Int32RegsRegClass);
- case 'l':
- case 'N':
- return std::make_pair(0U, &NVPTX::Int64RegsRegClass);
- case 'f':
- return std::make_pair(0U, &NVPTX::Float32RegsRegClass);
- case 'd':
- return std::make_pair(0U, &NVPTX::Float64RegsRegClass);
- }
- }
- return TargetLowering::getRegForInlineAsmConstraint(TRI, Constraint, VT);
-}
-
-//===----------------------------------------------------------------------===//
-// NVPTX DAG Combining
-//===----------------------------------------------------------------------===//
-
-bool NVPTXTargetLowering::allowFMA(MachineFunction &MF,
- CodeGenOpt::Level OptLevel) const {
- // Always honor command-line argument
- if (FMAContractLevelOpt.getNumOccurrences() > 0)
- return FMAContractLevelOpt > 0;
-
- // Do not contract if we're not optimizing the code.
- if (OptLevel == 0)
- return false;
-
- // Honor TargetOptions flags that explicitly say fusion is okay.
- if (MF.getTarget().Options.AllowFPOpFusion == FPOpFusion::Fast)
- return true;
-
- return allowUnsafeFPMath(MF);
-}
-
-bool NVPTXTargetLowering::allowUnsafeFPMath(MachineFunction &MF) const {
- // Honor TargetOptions flags that explicitly say unsafe math is okay.
- if (MF.getTarget().Options.UnsafeFPMath)
- return true;
-
- // Allow unsafe math if unsafe-fp-math attribute explicitly says so.
- const Function *F = MF.getFunction();
- if (F->hasFnAttribute("unsafe-fp-math")) {
- Attribute Attr = F->getFnAttribute("unsafe-fp-math");
- StringRef Val = Attr.getValueAsString();
- if (Val == "true")
- return true;
- }
-
- return false;
-}
-
-/// PerformADDCombineWithOperands - Try DAG combinations for an ADD with
-/// operands N0 and N1. This is a helper for PerformADDCombine that is
-/// called with the default operands, and if that fails, with commuted
-/// operands.
-static SDValue PerformADDCombineWithOperands(SDNode *N, SDValue N0, SDValue N1,
- TargetLowering::DAGCombinerInfo &DCI,
- const NVPTXSubtarget &Subtarget,
- CodeGenOpt::Level OptLevel) {
- SelectionDAG &DAG = DCI.DAG;
- // Skip non-integer, non-scalar case
- EVT VT=N0.getValueType();
- if (VT.isVector())
- return SDValue();
-
- // fold (add (mul a, b), c) -> (mad a, b, c)
- //
- if (N0.getOpcode() == ISD::MUL) {
- assert (VT.isInteger());
- // For integer:
- // Since integer multiply-add costs the same as integer multiply
- // but is more costly than integer add, do the fusion only when
- // the mul is only used in the add.
- if (OptLevel==CodeGenOpt::None || VT != MVT::i32 ||
- !N0.getNode()->hasOneUse())
- return SDValue();
-
- // Do the folding
- return DAG.getNode(NVPTXISD::IMAD, SDLoc(N), VT,
- N0.getOperand(0), N0.getOperand(1), N1);
- }
- else if (N0.getOpcode() == ISD::FMUL) {
- if (VT == MVT::f32 || VT == MVT::f64) {
- const auto *TLI = static_cast<const NVPTXTargetLowering *>(
- &DAG.getTargetLoweringInfo());
- if (!TLI->allowFMA(DAG.getMachineFunction(), OptLevel))
- return SDValue();
-
- // For floating point:
- // Do the fusion only when the mul has less than 5 uses and all
- // are add.
- // The heuristic is that if a use is not an add, then that use
- // cannot be fused into fma, therefore mul is still needed anyway.
- // If there are more than 4 uses, even if they are all add, fusing
- // them will increase register pressue.
- //
- int numUses = 0;
- int nonAddCount = 0;
- for (SDNode::use_iterator UI = N0.getNode()->use_begin(),
- UE = N0.getNode()->use_end();
- UI != UE; ++UI) {
- numUses++;
- SDNode *User = *UI;
- if (User->getOpcode() != ISD::FADD)
- ++nonAddCount;
- }
- if (numUses >= 5)
- return SDValue();
- if (nonAddCount) {
- int orderNo = N->getIROrder();
- int orderNo2 = N0.getNode()->getIROrder();
- // simple heuristics here for considering potential register
- // pressure, the logics here is that the differnce are used
- // to measure the distance between def and use, the longer distance
- // more likely cause register pressure.
- if (orderNo - orderNo2 < 500)
- return SDValue();
-
- // Now, check if at least one of the FMUL's operands is live beyond the node N,
- // which guarantees that the FMA will not increase register pressure at node N.
- bool opIsLive = false;
- const SDNode *left = N0.getOperand(0).getNode();
- const SDNode *right = N0.getOperand(1).getNode();
-
- if (isa<ConstantSDNode>(left) || isa<ConstantSDNode>(right))
- opIsLive = true;
-
- if (!opIsLive)
- for (SDNode::use_iterator UI = left->use_begin(), UE = left->use_end(); UI != UE; ++UI) {
- SDNode *User = *UI;
- int orderNo3 = User->getIROrder();
- if (orderNo3 > orderNo) {
- opIsLive = true;
- break;
- }
- }
-
- if (!opIsLive)
- for (SDNode::use_iterator UI = right->use_begin(), UE = right->use_end(); UI != UE; ++UI) {
- SDNode *User = *UI;
- int orderNo3 = User->getIROrder();
- if (orderNo3 > orderNo) {
- opIsLive = true;
- break;
- }
- }
-
- if (!opIsLive)
- return SDValue();
- }
-
- return DAG.getNode(ISD::FMA, SDLoc(N), VT,
- N0.getOperand(0), N0.getOperand(1), N1);
- }
- }
-
- return SDValue();
-}
-
-/// PerformADDCombine - Target-specific dag combine xforms for ISD::ADD.
-///
-static SDValue PerformADDCombine(SDNode *N,
- TargetLowering::DAGCombinerInfo &DCI,
- const NVPTXSubtarget &Subtarget,
- CodeGenOpt::Level OptLevel) {
- SDValue N0 = N->getOperand(0);
- SDValue N1 = N->getOperand(1);
-
- // First try with the default operand order.
- if (SDValue Result =
- PerformADDCombineWithOperands(N, N0, N1, DCI, Subtarget, OptLevel))
- return Result;
-
- // If that didn't work, try again with the operands commuted.
- return PerformADDCombineWithOperands(N, N1, N0, DCI, Subtarget, OptLevel);
-}
-
-static SDValue PerformANDCombine(SDNode *N,
- TargetLowering::DAGCombinerInfo &DCI) {
- // The type legalizer turns a vector load of i8 values into a zextload to i16
- // registers, optionally ANY_EXTENDs it (if target type is integer),
- // and ANDs off the high 8 bits. Since we turn this load into a
- // target-specific DAG node, the DAG combiner fails to eliminate these AND
- // nodes. Do that here.
- SDValue Val = N->getOperand(0);
- SDValue Mask = N->getOperand(1);
-
- if (isa<ConstantSDNode>(Val)) {
- std::swap(Val, Mask);
- }
-
- SDValue AExt;
- // Generally, we will see zextload -> IMOV16rr -> ANY_EXTEND -> and
- if (Val.getOpcode() == ISD::ANY_EXTEND) {
- AExt = Val;
- Val = Val->getOperand(0);
- }
-
- if (Val->isMachineOpcode() && Val->getMachineOpcode() == NVPTX::IMOV16rr) {
- Val = Val->getOperand(0);
- }
-
- if (Val->getOpcode() == NVPTXISD::LoadV2 ||
- Val->getOpcode() == NVPTXISD::LoadV4) {
- ConstantSDNode *MaskCnst = dyn_cast<ConstantSDNode>(Mask);
- if (!MaskCnst) {
- // Not an AND with a constant
- return SDValue();
- }
-
- uint64_t MaskVal = MaskCnst->getZExtValue();
- if (MaskVal != 0xff) {
- // Not an AND that chops off top 8 bits
- return SDValue();
- }
-
- MemSDNode *Mem = dyn_cast<MemSDNode>(Val);
- if (!Mem) {
- // Not a MemSDNode?!?
- return SDValue();
- }
-
- EVT MemVT = Mem->getMemoryVT();
- if (MemVT != MVT::v2i8 && MemVT != MVT::v4i8) {
- // We only handle the i8 case
- return SDValue();
- }
-
- unsigned ExtType =
- cast<ConstantSDNode>(Val->getOperand(Val->getNumOperands()-1))->
- getZExtValue();
- if (ExtType == ISD::SEXTLOAD) {
- // If for some reason the load is a sextload, the and is needed to zero
- // out the high 8 bits
- return SDValue();
- }
-
- bool AddTo = false;
- if (AExt.getNode() != nullptr) {
- // Re-insert the ext as a zext.
- Val = DCI.DAG.getNode(ISD::ZERO_EXTEND, SDLoc(N),
- AExt.getValueType(), Val);
- AddTo = true;
- }
-
- // If we get here, the AND is unnecessary. Just replace it with the load
- DCI.CombineTo(N, Val, AddTo);
- }
-
- return SDValue();
-}
-
-static SDValue PerformREMCombine(SDNode *N,
- TargetLowering::DAGCombinerInfo &DCI,
- CodeGenOpt::Level OptLevel) {
- assert(N->getOpcode() == ISD::SREM || N->getOpcode() == ISD::UREM);
-
- // Don't do anything at less than -O2.
- if (OptLevel < CodeGenOpt::Default)
- return SDValue();
-
- SelectionDAG &DAG = DCI.DAG;
- SDLoc DL(N);
- EVT VT = N->getValueType(0);
- bool IsSigned = N->getOpcode() == ISD::SREM;
- unsigned DivOpc = IsSigned ? ISD::SDIV : ISD::UDIV;
-
- const SDValue &Num = N->getOperand(0);
- const SDValue &Den = N->getOperand(1);
-
- for (const SDNode *U : Num->uses()) {
- if (U->getOpcode() == DivOpc && U->getOperand(0) == Num &&
- U->getOperand(1) == Den) {
- // Num % Den -> Num - (Num / Den) * Den
- return DAG.getNode(ISD::SUB, DL, VT, Num,
- DAG.getNode(ISD::MUL, DL, VT,
- DAG.getNode(DivOpc, DL, VT, Num, Den),
- Den));
- }
- }
- return SDValue();
-}
-
-enum OperandSignedness {
- Signed = 0,
- Unsigned,
- Unknown
-};
-
-/// IsMulWideOperandDemotable - Checks if the provided DAG node is an operand
-/// that can be demoted to \p OptSize bits without loss of information. The
-/// signedness of the operand, if determinable, is placed in \p S.
-static bool IsMulWideOperandDemotable(SDValue Op,
- unsigned OptSize,
- OperandSignedness &S) {
- S = Unknown;
-
- if (Op.getOpcode() == ISD::SIGN_EXTEND ||
- Op.getOpcode() == ISD::SIGN_EXTEND_INREG) {
- EVT OrigVT = Op.getOperand(0).getValueType();
- if (OrigVT.getSizeInBits() <= OptSize) {
- S = Signed;
- return true;
- }
- } else if (Op.getOpcode() == ISD::ZERO_EXTEND) {
- EVT OrigVT = Op.getOperand(0).getValueType();
- if (OrigVT.getSizeInBits() <= OptSize) {
- S = Unsigned;
- return true;
- }
- }
-
- return false;
-}
-
-/// AreMulWideOperandsDemotable - Checks if the given LHS and RHS operands can
-/// be demoted to \p OptSize bits without loss of information. If the operands
-/// contain a constant, it should appear as the RHS operand. The signedness of
-/// the operands is placed in \p IsSigned.
-static bool AreMulWideOperandsDemotable(SDValue LHS, SDValue RHS,
- unsigned OptSize,
- bool &IsSigned) {
- OperandSignedness LHSSign;
-
- // The LHS operand must be a demotable op
- if (!IsMulWideOperandDemotable(LHS, OptSize, LHSSign))
- return false;
-
- // We should have been able to determine the signedness from the LHS
- if (LHSSign == Unknown)
- return false;
-
- IsSigned = (LHSSign == Signed);
-
- // The RHS can be a demotable op or a constant
- if (ConstantSDNode *CI = dyn_cast<ConstantSDNode>(RHS)) {
- const APInt &Val = CI->getAPIntValue();
- if (LHSSign == Unsigned) {
- return Val.isIntN(OptSize);
- } else {
- return Val.isSignedIntN(OptSize);
- }
- } else {
- OperandSignedness RHSSign;
- if (!IsMulWideOperandDemotable(RHS, OptSize, RHSSign))
- return false;
-
- return LHSSign == RHSSign;
- }
-}
-
-/// TryMULWIDECombine - Attempt to replace a multiply of M bits with a multiply
-/// of M/2 bits that produces an M-bit result (i.e. mul.wide). This transform
-/// works on both multiply DAG nodes and SHL DAG nodes with a constant shift
-/// amount.
-static SDValue TryMULWIDECombine(SDNode *N,
- TargetLowering::DAGCombinerInfo &DCI) {
- EVT MulType = N->getValueType(0);
- if (MulType != MVT::i32 && MulType != MVT::i64) {
- return SDValue();
- }
-
- SDLoc DL(N);
- unsigned OptSize = MulType.getSizeInBits() >> 1;
- SDValue LHS = N->getOperand(0);
- SDValue RHS = N->getOperand(1);
-
- // Canonicalize the multiply so the constant (if any) is on the right
- if (N->getOpcode() == ISD::MUL) {
- if (isa<ConstantSDNode>(LHS)) {
- std::swap(LHS, RHS);
- }
- }
-
- // If we have a SHL, determine the actual multiply amount
- if (N->getOpcode() == ISD::SHL) {
- ConstantSDNode *ShlRHS = dyn_cast<ConstantSDNode>(RHS);
- if (!ShlRHS) {
- return SDValue();
- }
-
- APInt ShiftAmt = ShlRHS->getAPIntValue();
- unsigned BitWidth = MulType.getSizeInBits();
- if (ShiftAmt.sge(0) && ShiftAmt.slt(BitWidth)) {
- APInt MulVal = APInt(BitWidth, 1) << ShiftAmt;
- RHS = DCI.DAG.getConstant(MulVal, DL, MulType);
- } else {
- return SDValue();
- }
- }
-
- bool Signed;
- // Verify that our operands are demotable
- if (!AreMulWideOperandsDemotable(LHS, RHS, OptSize, Signed)) {
- return SDValue();
- }
-
- EVT DemotedVT;
- if (MulType == MVT::i32) {
- DemotedVT = MVT::i16;
- } else {
- DemotedVT = MVT::i32;
- }
-
- // Truncate the operands to the correct size. Note that these are just for
- // type consistency and will (likely) be eliminated in later phases.
- SDValue TruncLHS =
- DCI.DAG.getNode(ISD::TRUNCATE, DL, DemotedVT, LHS);
- SDValue TruncRHS =
- DCI.DAG.getNode(ISD::TRUNCATE, DL, DemotedVT, RHS);
-
- unsigned Opc;
- if (Signed) {
- Opc = NVPTXISD::MUL_WIDE_SIGNED;
- } else {
- Opc = NVPTXISD::MUL_WIDE_UNSIGNED;
- }
-
- return DCI.DAG.getNode(Opc, DL, MulType, TruncLHS, TruncRHS);
-}
-
-/// PerformMULCombine - Runs PTX-specific DAG combine patterns on MUL nodes.
-static SDValue PerformMULCombine(SDNode *N,
- TargetLowering::DAGCombinerInfo &DCI,
- CodeGenOpt::Level OptLevel) {
- if (OptLevel > 0) {
- // Try mul.wide combining at OptLevel > 0
- if (SDValue Ret = TryMULWIDECombine(N, DCI))
- return Ret;
- }
-
- return SDValue();
-}
-
-/// PerformSHLCombine - Runs PTX-specific DAG combine patterns on SHL nodes.
-static SDValue PerformSHLCombine(SDNode *N,
- TargetLowering::DAGCombinerInfo &DCI,
- CodeGenOpt::Level OptLevel) {
- if (OptLevel > 0) {
- // Try mul.wide combining at OptLevel > 0
- if (SDValue Ret = TryMULWIDECombine(N, DCI))
- return Ret;
- }
-
- return SDValue();
-}
-
-static SDValue PerformSETCCCombine(SDNode *N,
- TargetLowering::DAGCombinerInfo &DCI) {
- EVT CCType = N->getValueType(0);
- SDValue A = N->getOperand(0);
- SDValue B = N->getOperand(1);
-
- if (CCType != MVT::v2i1 || A.getValueType() != MVT::v2f16)
- return SDValue();
-
- SDLoc DL(N);
- // setp.f16x2 returns two scalar predicates, which we need to
- // convert back to v2i1. The returned result will be scalarized by
- // the legalizer, but the comparison will remain a single vector
- // instruction.
- SDValue CCNode = DCI.DAG.getNode(NVPTXISD::SETP_F16X2, DL,
- DCI.DAG.getVTList(MVT::i1, MVT::i1),
- {A, B, N->getOperand(2)});
- return DCI.DAG.getNode(ISD::BUILD_VECTOR, DL, CCType, CCNode.getValue(0),
- CCNode.getValue(1));
-}
-
-SDValue NVPTXTargetLowering::PerformDAGCombine(SDNode *N,
- DAGCombinerInfo &DCI) const {
- CodeGenOpt::Level OptLevel = getTargetMachine().getOptLevel();
- switch (N->getOpcode()) {
- default: break;
- case ISD::ADD:
- case ISD::FADD:
- return PerformADDCombine(N, DCI, STI, OptLevel);
- case ISD::MUL:
- return PerformMULCombine(N, DCI, OptLevel);
- case ISD::SHL:
- return PerformSHLCombine(N, DCI, OptLevel);
- case ISD::AND:
- return PerformANDCombine(N, DCI);
- case ISD::UREM:
- case ISD::SREM:
- return PerformREMCombine(N, DCI, OptLevel);
- case ISD::SETCC:
- return PerformSETCCCombine(N, DCI);
- }
- return SDValue();
-}
-
-/// ReplaceVectorLoad - Convert vector loads into multi-output scalar loads.
-static void ReplaceLoadVector(SDNode *N, SelectionDAG &DAG,
- SmallVectorImpl<SDValue> &Results) {
- EVT ResVT = N->getValueType(0);
- SDLoc DL(N);
-
- assert(ResVT.isVector() && "Vector load must have vector type");
-
- // We only handle "native" vector sizes for now, e.g. <4 x double> is not
- // legal. We can (and should) split that into 2 loads of <2 x double> here
- // but I'm leaving that as a TODO for now.
- assert(ResVT.isSimple() && "Can only handle simple types");
- switch (ResVT.getSimpleVT().SimpleTy) {
- default:
- return;
- case MVT::v2i8:
- case MVT::v2i16:
- case MVT::v2i32:
- case MVT::v2i64:
- case MVT::v2f16:
- case MVT::v2f32:
- case MVT::v2f64:
- case MVT::v4i8:
- case MVT::v4i16:
- case MVT::v4i32:
- case MVT::v4f16:
- case MVT::v4f32:
- case MVT::v8f16: // <4 x f16x2>
- // This is a "native" vector type
- break;
- }
-
- LoadSDNode *LD = cast<LoadSDNode>(N);
-
- unsigned Align = LD->getAlignment();
- auto &TD = DAG.getDataLayout();
- unsigned PrefAlign =
- TD.getPrefTypeAlignment(ResVT.getTypeForEVT(*DAG.getContext()));
- if (Align < PrefAlign) {
- // This load is not sufficiently aligned, so bail out and let this vector
- // load be scalarized. Note that we may still be able to emit smaller
- // vector loads. For example, if we are loading a <4 x float> with an
- // alignment of 8, this check will fail but the legalizer will try again
- // with 2 x <2 x float>, which will succeed with an alignment of 8.
- return;
- }
-
- EVT EltVT = ResVT.getVectorElementType();
- unsigned NumElts = ResVT.getVectorNumElements();
-
- // Since LoadV2 is a target node, we cannot rely on DAG type legalization.
- // Therefore, we must ensure the type is legal. For i1 and i8, we set the
- // loaded type to i16 and propagate the "real" type as the memory type.
- bool NeedTrunc = false;
- if (EltVT.getSizeInBits() < 16) {
- EltVT = MVT::i16;
- NeedTrunc = true;
- }
-
- unsigned Opcode = 0;
- SDVTList LdResVTs;
- bool LoadF16x2 = false;
-
- switch (NumElts) {
- default:
- return;
- case 2:
- Opcode = NVPTXISD::LoadV2;
- LdResVTs = DAG.getVTList(EltVT, EltVT, MVT::Other);
- break;
- case 4: {
- Opcode = NVPTXISD::LoadV4;
- EVT ListVTs[] = { EltVT, EltVT, EltVT, EltVT, MVT::Other };
- LdResVTs = DAG.getVTList(ListVTs);
- break;
- }
- case 8: {
- // v8f16 is a special case. PTX doesn't have ld.v8.f16
- // instruction. Instead, we split the vector into v2f16 chunks and
- // load them with ld.v4.b32.
- assert(EltVT == MVT::f16 && "Unsupported v8 vector type.");
- LoadF16x2 = true;
- Opcode = NVPTXISD::LoadV4;
- EVT ListVTs[] = {MVT::v2f16, MVT::v2f16, MVT::v2f16, MVT::v2f16,
- MVT::Other};
- LdResVTs = DAG.getVTList(ListVTs);
- break;
- }
- }
-
- // Copy regular operands
- SmallVector<SDValue, 8> OtherOps(N->op_begin(), N->op_end());
-
- // The select routine does not have access to the LoadSDNode instance, so
- // pass along the extension information
- OtherOps.push_back(DAG.getIntPtrConstant(LD->getExtensionType(), DL));
-
- SDValue NewLD = DAG.getMemIntrinsicNode(Opcode, DL, LdResVTs, OtherOps,
- LD->getMemoryVT(),
- LD->getMemOperand());
-
- SmallVector<SDValue, 8> ScalarRes;
- if (LoadF16x2) {
- // Split v2f16 subvectors back into individual elements.
- NumElts /= 2;
- for (unsigned i = 0; i < NumElts; ++i) {
- SDValue SubVector = NewLD.getValue(i);
- SDValue E0 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, EltVT, SubVector,
- DAG.getIntPtrConstant(0, DL));
- SDValue E1 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, EltVT, SubVector,
- DAG.getIntPtrConstant(1, DL));
- ScalarRes.push_back(E0);
- ScalarRes.push_back(E1);
- }
- } else {
- for (unsigned i = 0; i < NumElts; ++i) {
- SDValue Res = NewLD.getValue(i);
- if (NeedTrunc)
- Res = DAG.getNode(ISD::TRUNCATE, DL, ResVT.getVectorElementType(), Res);
- ScalarRes.push_back(Res);
- }
- }
-
- SDValue LoadChain = NewLD.getValue(NumElts);
-
- SDValue BuildVec = DAG.getBuildVector(ResVT, DL, ScalarRes);
-
- Results.push_back(BuildVec);
- Results.push_back(LoadChain);
-}
-
-static void ReplaceINTRINSIC_W_CHAIN(SDNode *N, SelectionDAG &DAG,
- SmallVectorImpl<SDValue> &Results) {
- SDValue Chain = N->getOperand(0);
- SDValue Intrin = N->getOperand(1);
- SDLoc DL(N);
-
- // Get the intrinsic ID
- unsigned IntrinNo = cast<ConstantSDNode>(Intrin.getNode())->getZExtValue();
- switch (IntrinNo) {
- default:
- return;
- case Intrinsic::nvvm_ldg_global_i:
- case Intrinsic::nvvm_ldg_global_f:
- case Intrinsic::nvvm_ldg_global_p:
- case Intrinsic::nvvm_ldu_global_i:
- case Intrinsic::nvvm_ldu_global_f:
- case Intrinsic::nvvm_ldu_global_p: {
- EVT ResVT = N->getValueType(0);
-
- if (ResVT.isVector()) {
- // Vector LDG/LDU
-
- unsigned NumElts = ResVT.getVectorNumElements();
- EVT EltVT = ResVT.getVectorElementType();
-
- // Since LDU/LDG are target nodes, we cannot rely on DAG type
- // legalization.
- // Therefore, we must ensure the type is legal. For i1 and i8, we set the
- // loaded type to i16 and propagate the "real" type as the memory type.
- bool NeedTrunc = false;
- if (EltVT.getSizeInBits() < 16) {
- EltVT = MVT::i16;
- NeedTrunc = true;
- }
-
- unsigned Opcode = 0;
- SDVTList LdResVTs;
-
- switch (NumElts) {
- default:
- return;
- case 2:
- switch (IntrinNo) {
- default:
- return;
- case Intrinsic::nvvm_ldg_global_i:
- case Intrinsic::nvvm_ldg_global_f:
- case Intrinsic::nvvm_ldg_global_p:
- Opcode = NVPTXISD::LDGV2;
- break;
- case Intrinsic::nvvm_ldu_global_i:
- case Intrinsic::nvvm_ldu_global_f:
- case Intrinsic::nvvm_ldu_global_p:
- Opcode = NVPTXISD::LDUV2;
- break;
- }
- LdResVTs = DAG.getVTList(EltVT, EltVT, MVT::Other);
- break;
- case 4: {
- switch (IntrinNo) {
- default:
- return;
- case Intrinsic::nvvm_ldg_global_i:
- case Intrinsic::nvvm_ldg_global_f:
- case Intrinsic::nvvm_ldg_global_p:
- Opcode = NVPTXISD::LDGV4;
- break;
- case Intrinsic::nvvm_ldu_global_i:
- case Intrinsic::nvvm_ldu_global_f:
- case Intrinsic::nvvm_ldu_global_p:
- Opcode = NVPTXISD::LDUV4;
- break;
- }
- EVT ListVTs[] = { EltVT, EltVT, EltVT, EltVT, MVT::Other };
- LdResVTs = DAG.getVTList(ListVTs);
- break;
- }
- }
-
- SmallVector<SDValue, 8> OtherOps;
-
- // Copy regular operands
-
- OtherOps.push_back(Chain); // Chain
- // Skip operand 1 (intrinsic ID)
- // Others
- OtherOps.append(N->op_begin() + 2, N->op_end());
-
- MemIntrinsicSDNode *MemSD = cast<MemIntrinsicSDNode>(N);
-
- SDValue NewLD = DAG.getMemIntrinsicNode(Opcode, DL, LdResVTs, OtherOps,
- MemSD->getMemoryVT(),
- MemSD->getMemOperand());
-
- SmallVector<SDValue, 4> ScalarRes;
-
- for (unsigned i = 0; i < NumElts; ++i) {
- SDValue Res = NewLD.getValue(i);
- if (NeedTrunc)
- Res =
- DAG.getNode(ISD::TRUNCATE, DL, ResVT.getVectorElementType(), Res);
- ScalarRes.push_back(Res);
- }
-
- SDValue LoadChain = NewLD.getValue(NumElts);
-
- SDValue BuildVec =
- DAG.getBuildVector(ResVT, DL, ScalarRes);
-
- Results.push_back(BuildVec);
- Results.push_back(LoadChain);
- } else {
- // i8 LDG/LDU
- assert(ResVT.isSimple() && ResVT.getSimpleVT().SimpleTy == MVT::i8 &&
- "Custom handling of non-i8 ldu/ldg?");
-
- // Just copy all operands as-is
- SmallVector<SDValue, 4> Ops(N->op_begin(), N->op_end());
-
- // Force output to i16
- SDVTList LdResVTs = DAG.getVTList(MVT::i16, MVT::Other);
-
- MemIntrinsicSDNode *MemSD = cast<MemIntrinsicSDNode>(N);
-
- // We make sure the memory type is i8, which will be used during isel
- // to select the proper instruction.
- SDValue NewLD =
- DAG.getMemIntrinsicNode(ISD::INTRINSIC_W_CHAIN, DL, LdResVTs, Ops,
- MVT::i8, MemSD->getMemOperand());
-
- Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i8,
- NewLD.getValue(0)));
- Results.push_back(NewLD.getValue(1));
- }
- }
- }
-}
-
-void NVPTXTargetLowering::ReplaceNodeResults(
- SDNode *N, SmallVectorImpl<SDValue> &Results, SelectionDAG &DAG) const {
- switch (N->getOpcode()) {
- default:
- report_fatal_error("Unhandled custom legalization");
- case ISD::LOAD:
- ReplaceLoadVector(N, DAG, Results);
- return;
- case ISD::INTRINSIC_W_CHAIN:
- ReplaceINTRINSIC_W_CHAIN(N, DAG, Results);
- return;
- }
-}
-
-// Pin NVPTXSection's and NVPTXTargetObjectFile's vtables to this file.
-void NVPTXSection::anchor() {}
-
-NVPTXTargetObjectFile::~NVPTXTargetObjectFile() {
- delete static_cast<NVPTXSection *>(TextSection);
- delete static_cast<NVPTXSection *>(DataSection);
- delete static_cast<NVPTXSection *>(BSSSection);
- delete static_cast<NVPTXSection *>(ReadOnlySection);
-
- delete static_cast<NVPTXSection *>(StaticCtorSection);
- delete static_cast<NVPTXSection *>(StaticDtorSection);
- delete static_cast<NVPTXSection *>(LSDASection);
- delete static_cast<NVPTXSection *>(EHFrameSection);
- delete static_cast<NVPTXSection *>(DwarfAbbrevSection);
- delete static_cast<NVPTXSection *>(DwarfInfoSection);
- delete static_cast<NVPTXSection *>(DwarfLineSection);
- delete static_cast<NVPTXSection *>(DwarfFrameSection);
- delete static_cast<NVPTXSection *>(DwarfPubTypesSection);
- delete static_cast<const NVPTXSection *>(DwarfDebugInlineSection);
- delete static_cast<NVPTXSection *>(DwarfStrSection);
- delete static_cast<NVPTXSection *>(DwarfLocSection);
- delete static_cast<NVPTXSection *>(DwarfARangesSection);
- delete static_cast<NVPTXSection *>(DwarfRangesSection);
- delete static_cast<NVPTXSection *>(DwarfMacinfoSection);
-}
-
-MCSection *NVPTXTargetObjectFile::SelectSectionForGlobal(
- const GlobalObject *GO, SectionKind Kind, const TargetMachine &TM) const {
- return getDataSection();
-}
+//===-- NVPTXISelLowering.cpp - NVPTX DAG Lowering Implementation ---------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the interfaces that NVPTX uses to lower LLVM code into a
+// selection DAG.
+//
+//===----------------------------------------------------------------------===//
+
+#include "MCTargetDesc/NVPTXBaseInfo.h"
+#include "NVPTX.h"
+#include "NVPTXISelLowering.h"
+#include "NVPTXSection.h"
+#include "NVPTXSubtarget.h"
+#include "NVPTXTargetMachine.h"
+#include "NVPTXTargetObjectFile.h"
+#include "NVPTXUtilities.h"
+#include "llvm/ADT/APInt.h"
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/ADT/StringRef.h"
+#include "llvm/CodeGen/Analysis.h"
+#include "llvm/CodeGen/MachineFunction.h"
+#include "llvm/CodeGen/MachineMemOperand.h"
+#include "llvm/CodeGen/MachineValueType.h"
+#include "llvm/CodeGen/SelectionDAG.h"
+#include "llvm/CodeGen/SelectionDAGNodes.h"
+#include "llvm/CodeGen/ValueTypes.h"
+#include "llvm/IR/Argument.h"
+#include "llvm/IR/Attributes.h"
+#include "llvm/IR/CallSite.h"
+#include "llvm/IR/Constants.h"
+#include "llvm/IR/DataLayout.h"
+#include "llvm/IR/DerivedTypes.h"
+#include "llvm/IR/Function.h"
+#include "llvm/IR/GlobalValue.h"
+#include "llvm/IR/Instruction.h"
+#include "llvm/IR/Instructions.h"
+#include "llvm/IR/Module.h"
+#include "llvm/IR/Type.h"
+#include "llvm/IR/Value.h"
+#include "llvm/Support/Casting.h"
+#include "llvm/Support/CodeGen.h"
+#include "llvm/Support/CommandLine.h"
+#include "llvm/Support/ErrorHandling.h"
+#include "llvm/Support/MathExtras.h"
+#include "llvm/Support/raw_ostream.h"
+#include "llvm/Target/TargetCallingConv.h"
+#include "llvm/Target/TargetLowering.h"
+#include "llvm/Target/TargetMachine.h"
+#include "llvm/Target/TargetOptions.h"
+#include <algorithm>
+#include <cassert>
+#include <cstdint>
+#include <iterator>
+#include <sstream>
+#include <string>
+#include <utility>
+#include <vector>
+
+#undef DEBUG_TYPE
+#define DEBUG_TYPE "nvptx-lower"
+
+using namespace llvm;
+
+static unsigned int uniqueCallSite = 0;
+
+static cl::opt<bool> sched4reg(
+ "nvptx-sched4reg",
+ cl::desc("NVPTX Specific: schedule for register pressue"), cl::init(false));
+
+static cl::opt<unsigned>
+FMAContractLevelOpt("nvptx-fma-level", cl::ZeroOrMore, cl::Hidden,
+ cl::desc("NVPTX Specific: FMA contraction (0: don't do it"
+ " 1: do it 2: do it aggressively"),
+ cl::init(2));
+
+static cl::opt<int> UsePrecDivF32(
+ "nvptx-prec-divf32", cl::ZeroOrMore, cl::Hidden,
+ cl::desc("NVPTX Specifies: 0 use div.approx, 1 use div.full, 2 use"
+ " IEEE Compliant F32 div.rnd if available."),
+ cl::init(2));
+
+static cl::opt<bool> UsePrecSqrtF32(
+ "nvptx-prec-sqrtf32", cl::Hidden,
+ cl::desc("NVPTX Specific: 0 use sqrt.approx, 1 use sqrt.rn."),
+ cl::init(true));
+
+static cl::opt<bool> FtzEnabled(
+ "nvptx-f32ftz", cl::ZeroOrMore, cl::Hidden,
+ cl::desc("NVPTX Specific: Flush f32 subnormals to sign-preserving zero."),
+ cl::init(false));
+
+int NVPTXTargetLowering::getDivF32Level() const {
+ if (UsePrecDivF32.getNumOccurrences() > 0) {
+ // If nvptx-prec-div32=N is used on the command-line, always honor it
+ return UsePrecDivF32;
+ } else {
+ // Otherwise, use div.approx if fast math is enabled
+ if (getTargetMachine().Options.UnsafeFPMath)
+ return 0;
+ else
+ return 2;
+ }
+}
+
+bool NVPTXTargetLowering::usePrecSqrtF32() const {
+ if (UsePrecSqrtF32.getNumOccurrences() > 0) {
+ // If nvptx-prec-sqrtf32 is used on the command-line, always honor it
+ return UsePrecSqrtF32;
+ } else {
+ // Otherwise, use sqrt.approx if fast math is enabled
+ return !getTargetMachine().Options.UnsafeFPMath;
+ }
+}
+
+bool NVPTXTargetLowering::useF32FTZ(const MachineFunction &MF) const {
+ // TODO: Get rid of this flag; there can be only one way to do this.
+ if (FtzEnabled.getNumOccurrences() > 0) {
+ // If nvptx-f32ftz is used on the command-line, always honor it
+ return FtzEnabled;
+ } else {
+ const Function *F = MF.getFunction();
+ // Otherwise, check for an nvptx-f32ftz attribute on the function
+ if (F->hasFnAttribute("nvptx-f32ftz"))
+ return F->getFnAttribute("nvptx-f32ftz").getValueAsString() == "true";
+ else
+ return false;
+ }
+}
+
+static bool IsPTXVectorType(MVT VT) {
+ switch (VT.SimpleTy) {
+ default:
+ return false;
+ case MVT::v2i1:
+ case MVT::v4i1:
+ case MVT::v2i8:
+ case MVT::v4i8:
+ case MVT::v2i16:
+ case MVT::v4i16:
+ case MVT::v2i32:
+ case MVT::v4i32:
+ case MVT::v2i64:
+ case MVT::v2f16:
+ case MVT::v4f16:
+ case MVT::v8f16: // <4 x f16x2>
+ case MVT::v2f32:
+ case MVT::v4f32:
+ case MVT::v2f64:
+ return true;
+ }
+}
+
+/// ComputePTXValueVTs - For the given Type \p Ty, returns the set of primitive
+/// EVTs that compose it. Unlike ComputeValueVTs, this will break apart vectors
+/// into their primitive components.
+/// NOTE: This is a band-aid for code that expects ComputeValueVTs to return the
+/// same number of types as the Ins/Outs arrays in LowerFormalArguments,
+/// LowerCall, and LowerReturn.
+static void ComputePTXValueVTs(const TargetLowering &TLI, const DataLayout &DL,
+ Type *Ty, SmallVectorImpl<EVT> &ValueVTs,
+ SmallVectorImpl<uint64_t> *Offsets = nullptr,
+ uint64_t StartingOffset = 0) {
+ SmallVector<EVT, 16> TempVTs;
+ SmallVector<uint64_t, 16> TempOffsets;
+
+ ComputeValueVTs(TLI, DL, Ty, TempVTs, &TempOffsets, StartingOffset);
+ for (unsigned i = 0, e = TempVTs.size(); i != e; ++i) {
+ EVT VT = TempVTs[i];
+ uint64_t Off = TempOffsets[i];
+ // Split vectors into individual elements, except for v2f16, which
+ // we will pass as a single scalar.
+ if (VT.isVector()) {
+ unsigned NumElts = VT.getVectorNumElements();
+ EVT EltVT = VT.getVectorElementType();
+ // Vectors with an even number of f16 elements will be passed to
+ // us as an array of v2f16 elements. We must match this so we
+ // stay in sync with Ins/Outs.
+ if (EltVT == MVT::f16 && NumElts % 2 == 0) {
+ EltVT = MVT::v2f16;
+ NumElts /= 2;
+ }
+ for (unsigned j = 0; j != NumElts; ++j) {
+ ValueVTs.push_back(EltVT);
+ if (Offsets)
+ Offsets->push_back(Off + j * EltVT.getStoreSize());
+ }
+ } else {
+ ValueVTs.push_back(VT);
+ if (Offsets)
+ Offsets->push_back(Off);
+ }
+ }
+}
+
+// Check whether we can merge loads/stores of some of the pieces of a
+// flattened function parameter or return value into a single vector
+// load/store.
+//
+// The flattened parameter is represented as a list of EVTs and
+// offsets, and the whole structure is aligned to ParamAlignment. This
+// function determines whether we can load/store pieces of the
+// parameter starting at index Idx using a single vectorized op of
+// size AccessSize. If so, it returns the number of param pieces
+// covered by the vector op. Otherwise, it returns 1.
+static unsigned CanMergeParamLoadStoresStartingAt(
+ unsigned Idx, uint32_t AccessSize, const SmallVectorImpl<EVT> &ValueVTs,
+ const SmallVectorImpl<uint64_t> &Offsets, unsigned ParamAlignment) {
+ assert(isPowerOf2_32(AccessSize) && "must be a power of 2!");
+
+ // Can't vectorize if param alignment is not sufficient.
+ if (AccessSize > ParamAlignment)
+ return 1;
+ // Can't vectorize if offset is not aligned.
+ if (Offsets[Idx] & (AccessSize - 1))
+ return 1;
+
+ EVT EltVT = ValueVTs[Idx];
+ unsigned EltSize = EltVT.getStoreSize();
+
+ // Element is too large to vectorize.
+ if (EltSize >= AccessSize)
+ return 1;
+
+ unsigned NumElts = AccessSize / EltSize;
+ // Can't vectorize if AccessBytes if not a multiple of EltSize.
+ if (AccessSize != EltSize * NumElts)
+ return 1;
+
+ // We don't have enough elements to vectorize.
+ if (Idx + NumElts > ValueVTs.size())
+ return 1;
+
+ // PTX ISA can only deal with 2- and 4-element vector ops.
+ if (NumElts != 4 && NumElts != 2)
+ return 1;
+
+ for (unsigned j = Idx + 1; j < Idx + NumElts; ++j) {
+ // Types do not match.
+ if (ValueVTs[j] != EltVT)
+ return 1;
+
+ // Elements are not contiguous.
+ if (Offsets[j] - Offsets[j - 1] != EltSize)
+ return 1;
+ }
+ // OK. We can vectorize ValueVTs[i..i+NumElts)
+ return NumElts;
+}
+
+// Flags for tracking per-element vectorization state of loads/stores
+// of a flattened function parameter or return value.
+enum ParamVectorizationFlags {
+ PVF_INNER = 0x0, // Middle elements of a vector.
+ PVF_FIRST = 0x1, // First element of the vector.
+ PVF_LAST = 0x2, // Last element of the vector.
+ // Scalar is effectively a 1-element vector.
+ PVF_SCALAR = PVF_FIRST | PVF_LAST
+};
+
+// Computes whether and how we can vectorize the loads/stores of a
+// flattened function parameter or return value.
+//
+// The flattened parameter is represented as the list of ValueVTs and
+// Offsets, and is aligned to ParamAlignment bytes. We return a vector
+// of the same size as ValueVTs indicating how each piece should be
+// loaded/stored (i.e. as a scalar, or as part of a vector
+// load/store).
+static SmallVector<ParamVectorizationFlags, 16>
+VectorizePTXValueVTs(const SmallVectorImpl<EVT> &ValueVTs,
+ const SmallVectorImpl<uint64_t> &Offsets,
+ unsigned ParamAlignment) {
+ // Set vector size to match ValueVTs and mark all elements as
+ // scalars by default.
+ SmallVector<ParamVectorizationFlags, 16> VectorInfo;
+ VectorInfo.assign(ValueVTs.size(), PVF_SCALAR);
+
+ // Check what we can vectorize using 128/64/32-bit accesses.
+ for (int I = 0, E = ValueVTs.size(); I != E; ++I) {
+ // Skip elements we've already processed.
+ assert(VectorInfo[I] == PVF_SCALAR && "Unexpected vector info state.");
+ for (unsigned AccessSize : {16, 8, 4, 2}) {
+ unsigned NumElts = CanMergeParamLoadStoresStartingAt(
+ I, AccessSize, ValueVTs, Offsets, ParamAlignment);
+ // Mark vectorized elements.
+ switch (NumElts) {
+ default:
+ llvm_unreachable("Unexpected return value");
+ case 1:
+ // Can't vectorize using this size, try next smaller size.
+ continue;
+ case 2:
+ assert(I + 1 < E && "Not enough elements.");
+ VectorInfo[I] = PVF_FIRST;
+ VectorInfo[I + 1] = PVF_LAST;
+ I += 1;
+ break;
+ case 4:
+ assert(I + 3 < E && "Not enough elements.");
+ VectorInfo[I] = PVF_FIRST;
+ VectorInfo[I + 1] = PVF_INNER;
+ VectorInfo[I + 2] = PVF_INNER;
+ VectorInfo[I + 3] = PVF_LAST;
+ I += 3;
+ break;
+ }
+ // Break out of the inner loop because we've already succeeded
+ // using largest possible AccessSize.
+ break;
+ }
+ }
+ return VectorInfo;
+}
+
+// NVPTXTargetLowering Constructor.
+NVPTXTargetLowering::NVPTXTargetLowering(const NVPTXTargetMachine &TM,
+ const NVPTXSubtarget &STI)
+ : TargetLowering(TM), nvTM(&TM), STI(STI) {
+ // always lower memset, memcpy, and memmove intrinsics to load/store
+ // instructions, rather
+ // then generating calls to memset, mempcy or memmove.
+ MaxStoresPerMemset = (unsigned) 0xFFFFFFFF;
+ MaxStoresPerMemcpy = (unsigned) 0xFFFFFFFF;
+ MaxStoresPerMemmove = (unsigned) 0xFFFFFFFF;
+
+ setBooleanContents(ZeroOrNegativeOneBooleanContent);
+ setBooleanVectorContents(ZeroOrNegativeOneBooleanContent);
+
+ // Jump is Expensive. Don't create extra control flow for 'and', 'or'
+ // condition branches.
+ setJumpIsExpensive(true);
+
+ // Wide divides are _very_ slow. Try to reduce the width of the divide if
+ // possible.
+ addBypassSlowDiv(64, 32);
+
+ // By default, use the Source scheduling
+ if (sched4reg)
+ setSchedulingPreference(Sched::RegPressure);
+ else
+ setSchedulingPreference(Sched::Source);
+
+ auto setFP16OperationAction = [&](unsigned Op, MVT VT, LegalizeAction Action,
+ LegalizeAction NoF16Action) {
+ setOperationAction(Op, VT, STI.allowFP16Math() ? Action : NoF16Action);
+ };
+
+ addRegisterClass(MVT::i1, &NVPTX::Int1RegsRegClass);
+ addRegisterClass(MVT::i16, &NVPTX::Int16RegsRegClass);
+ addRegisterClass(MVT::i32, &NVPTX::Int32RegsRegClass);
+ addRegisterClass(MVT::i64, &NVPTX::Int64RegsRegClass);
+ addRegisterClass(MVT::f32, &NVPTX::Float32RegsRegClass);
+ addRegisterClass(MVT::f64, &NVPTX::Float64RegsRegClass);
+ addRegisterClass(MVT::f16, &NVPTX::Float16RegsRegClass);
+ addRegisterClass(MVT::v2f16, &NVPTX::Float16x2RegsRegClass);
+
+ // Conversion to/from FP16/FP16x2 is always legal.
+ setOperationAction(ISD::SINT_TO_FP, MVT::f16, Legal);
+ setOperationAction(ISD::FP_TO_SINT, MVT::f16, Legal);
+ setOperationAction(ISD::BUILD_VECTOR, MVT::v2f16, Custom);
+ setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v2f16, Custom);
+
+ setFP16OperationAction(ISD::SETCC, MVT::f16, Legal, Promote);
+ setFP16OperationAction(ISD::SETCC, MVT::v2f16, Legal, Expand);
+
+ // Operations not directly supported by NVPTX.
+ setOperationAction(ISD::SELECT_CC, MVT::f16, Expand);
+ setOperationAction(ISD::SELECT_CC, MVT::v2f16, Expand);
+ setOperationAction(ISD::SELECT_CC, MVT::f32, Expand);
+ setOperationAction(ISD::SELECT_CC, MVT::f64, Expand);
+ setOperationAction(ISD::SELECT_CC, MVT::i1, Expand);
+ setOperationAction(ISD::SELECT_CC, MVT::i8, Expand);
+ setOperationAction(ISD::SELECT_CC, MVT::i16, Expand);
+ setOperationAction(ISD::SELECT_CC, MVT::i32, Expand);
+ setOperationAction(ISD::SELECT_CC, MVT::i64, Expand);
+ setOperationAction(ISD::BR_CC, MVT::f16, Expand);
+ setOperationAction(ISD::BR_CC, MVT::v2f16, Expand);
+ setOperationAction(ISD::BR_CC, MVT::f32, Expand);
+ setOperationAction(ISD::BR_CC, MVT::f64, Expand);
+ setOperationAction(ISD::BR_CC, MVT::i1, Expand);
+ setOperationAction(ISD::BR_CC, MVT::i8, Expand);
+ setOperationAction(ISD::BR_CC, MVT::i16, Expand);
+ setOperationAction(ISD::BR_CC, MVT::i32, Expand);
+ setOperationAction(ISD::BR_CC, MVT::i64, Expand);
+ // Some SIGN_EXTEND_INREG can be done using cvt instruction.
+ // For others we will expand to a SHL/SRA pair.
+ setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i64, Legal);
+ setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i32, Legal);
+ setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i16, Legal);
+ setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i8 , Legal);
+ setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i1, Expand);
+
+ setOperationAction(ISD::SHL_PARTS, MVT::i32 , Custom);
+ setOperationAction(ISD::SRA_PARTS, MVT::i32 , Custom);
+ setOperationAction(ISD::SRL_PARTS, MVT::i32 , Custom);
+ setOperationAction(ISD::SHL_PARTS, MVT::i64 , Custom);
+ setOperationAction(ISD::SRA_PARTS, MVT::i64 , Custom);
+ setOperationAction(ISD::SRL_PARTS, MVT::i64 , Custom);
+
+ setOperationAction(ISD::BITREVERSE, MVT::i32, Legal);
+ setOperationAction(ISD::BITREVERSE, MVT::i64, Legal);
+
+ if (STI.hasROT64()) {
+ setOperationAction(ISD::ROTL, MVT::i64, Legal);
+ setOperationAction(ISD::ROTR, MVT::i64, Legal);
+ } else {
+ setOperationAction(ISD::ROTL, MVT::i64, Expand);
+ setOperationAction(ISD::ROTR, MVT::i64, Expand);
+ }
+ if (STI.hasROT32()) {
+ setOperationAction(ISD::ROTL, MVT::i32, Legal);
+ setOperationAction(ISD::ROTR, MVT::i32, Legal);
+ } else {
+ setOperationAction(ISD::ROTL, MVT::i32, Expand);
+ setOperationAction(ISD::ROTR, MVT::i32, Expand);
+ }
+
+ setOperationAction(ISD::ROTL, MVT::i16, Expand);
+ setOperationAction(ISD::ROTR, MVT::i16, Expand);
+ setOperationAction(ISD::ROTL, MVT::i8, Expand);
+ setOperationAction(ISD::ROTR, MVT::i8, Expand);
+ setOperationAction(ISD::BSWAP, MVT::i16, Expand);
+ setOperationAction(ISD::BSWAP, MVT::i32, Expand);
+ setOperationAction(ISD::BSWAP, MVT::i64, Expand);
+
+ // Indirect branch is not supported.
+ // This also disables Jump Table creation.
+ setOperationAction(ISD::BR_JT, MVT::Other, Expand);
+ setOperationAction(ISD::BRIND, MVT::Other, Expand);
+
+ setOperationAction(ISD::GlobalAddress, MVT::i32, Custom);
+ setOperationAction(ISD::GlobalAddress, MVT::i64, Custom);
+
+ // We want to legalize constant related memmove and memcopy
+ // intrinsics.
+ setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::Other, Custom);
+
+ // Turn FP extload into load/fpextend
+ setLoadExtAction(ISD::EXTLOAD, MVT::f32, MVT::f16, Expand);
+ setLoadExtAction(ISD::EXTLOAD, MVT::f64, MVT::f16, Expand);
+ setLoadExtAction(ISD::EXTLOAD, MVT::f64, MVT::f32, Expand);
+ setLoadExtAction(ISD::EXTLOAD, MVT::v2f32, MVT::v2f16, Expand);
+ setLoadExtAction(ISD::EXTLOAD, MVT::v2f64, MVT::v2f16, Expand);
+ setLoadExtAction(ISD::EXTLOAD, MVT::v2f64, MVT::v2f32, Expand);
+ setLoadExtAction(ISD::EXTLOAD, MVT::v4f32, MVT::v4f16, Expand);
+ setLoadExtAction(ISD::EXTLOAD, MVT::v4f64, MVT::v4f16, Expand);
+ setLoadExtAction(ISD::EXTLOAD, MVT::v4f64, MVT::v4f32, Expand);
+ // Turn FP truncstore into trunc + store.
+ // FIXME: vector types should also be expanded
+ setTruncStoreAction(MVT::f32, MVT::f16, Expand);
+ setTruncStoreAction(MVT::f64, MVT::f16, Expand);
+ setTruncStoreAction(MVT::f64, MVT::f32, Expand);
+
+ // PTX does not support load / store predicate registers
+ setOperationAction(ISD::LOAD, MVT::i1, Custom);
+ setOperationAction(ISD::STORE, MVT::i1, Custom);
+
+ for (MVT VT : MVT::integer_valuetypes()) {
+ setLoadExtAction(ISD::SEXTLOAD, VT, MVT::i1, Promote);
+ setLoadExtAction(ISD::ZEXTLOAD, VT, MVT::i1, Promote);
+ setTruncStoreAction(VT, MVT::i1, Expand);
+ }
+
+ // This is legal in NVPTX
+ setOperationAction(ISD::ConstantFP, MVT::f64, Legal);
+ setOperationAction(ISD::ConstantFP, MVT::f32, Legal);
+ setOperationAction(ISD::ConstantFP, MVT::f16, Legal);
+
+ // TRAP can be lowered to PTX trap
+ setOperationAction(ISD::TRAP, MVT::Other, Legal);
+
+ setOperationAction(ISD::ADDC, MVT::i64, Expand);
+ setOperationAction(ISD::ADDE, MVT::i64, Expand);
+
+ // Register custom handling for vector loads/stores
+ for (MVT VT : MVT::vector_valuetypes()) {
+ if (IsPTXVectorType(VT)) {
+ setOperationAction(ISD::LOAD, VT, Custom);
+ setOperationAction(ISD::STORE, VT, Custom);
+ setOperationAction(ISD::INTRINSIC_W_CHAIN, VT, Custom);
+ }
+ }
+
+ // Custom handling for i8 intrinsics
+ setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::i8, Custom);
+
+ for (const auto& Ty : {MVT::i16, MVT::i32, MVT::i64}) {
+ setOperationAction(ISD::ABS, Ty, Legal);
+ setOperationAction(ISD::SMIN, Ty, Legal);
+ setOperationAction(ISD::SMAX, Ty, Legal);
+ setOperationAction(ISD::UMIN, Ty, Legal);
+ setOperationAction(ISD::UMAX, Ty, Legal);
+
+ setOperationAction(ISD::CTPOP, Ty, Legal);
+ setOperationAction(ISD::CTLZ, Ty, Legal);
+ }
+
+ setOperationAction(ISD::CTTZ, MVT::i16, Expand);
+ setOperationAction(ISD::CTTZ, MVT::i32, Expand);
+ setOperationAction(ISD::CTTZ, MVT::i64, Expand);
+
+ // PTX does not directly support SELP of i1, so promote to i32 first
+ setOperationAction(ISD::SELECT, MVT::i1, Custom);
+
+ // PTX cannot multiply two i64s in a single instruction.
+ setOperationAction(ISD::SMUL_LOHI, MVT::i64, Expand);
+ setOperationAction(ISD::UMUL_LOHI, MVT::i64, Expand);
+
+ // We have some custom DAG combine patterns for these nodes
+ setTargetDAGCombine(ISD::ADD);
+ setTargetDAGCombine(ISD::AND);
+ setTargetDAGCombine(ISD::FADD);
+ setTargetDAGCombine(ISD::MUL);
+ setTargetDAGCombine(ISD::SHL);
+ setTargetDAGCombine(ISD::SREM);
+ setTargetDAGCombine(ISD::UREM);
+
+ // setcc for f16x2 needs special handling to prevent legalizer's
+ // attempt to scalarize it due to v2i1 not being legal.
+ if (STI.allowFP16Math())
+ setTargetDAGCombine(ISD::SETCC);
+
+ // Promote fp16 arithmetic if fp16 hardware isn't available or the
+ // user passed --nvptx-no-fp16-math. The flag is useful because,
+ // although sm_53+ GPUs have some sort of FP16 support in
+ // hardware, only sm_53 and sm_60 have full implementation. Others
+ // only have token amount of hardware and are likely to run faster
+ // by using fp32 units instead.
+ for (const auto &Op : {ISD::FADD, ISD::FMUL, ISD::FSUB, ISD::FMA}) {
+ setFP16OperationAction(Op, MVT::f16, Legal, Promote);
+ setFP16OperationAction(Op, MVT::v2f16, Legal, Expand);
+ }
+
+ // There's no neg.f16 instruction. Expand to (0-x).
+ setOperationAction(ISD::FNEG, MVT::f16, Expand);
+ setOperationAction(ISD::FNEG, MVT::v2f16, Expand);
+
+ // (would be) Library functions.
+
+ // These map to conversion instructions for scalar FP types.
+ for (const auto &Op : {ISD::FCEIL, ISD::FFLOOR, ISD::FNEARBYINT, ISD::FRINT,
+ ISD::FROUND, ISD::FTRUNC}) {
+ setOperationAction(Op, MVT::f16, Legal);
+ setOperationAction(Op, MVT::f32, Legal);
+ setOperationAction(Op, MVT::f64, Legal);
+ setOperationAction(Op, MVT::v2f16, Expand);
+ }
+
+ // 'Expand' implements FCOPYSIGN without calling an external library.
+ setOperationAction(ISD::FCOPYSIGN, MVT::f16, Expand);
+ setOperationAction(ISD::FCOPYSIGN, MVT::v2f16, Expand);
+ setOperationAction(ISD::FCOPYSIGN, MVT::f32, Expand);
+ setOperationAction(ISD::FCOPYSIGN, MVT::f64, Expand);
+
+ // These map to corresponding instructions for f32/f64. f16 must be
+ // promoted to f32. v2f16 is expanded to f16, which is then promoted
+ // to f32.
+ for (const auto &Op : {ISD::FDIV, ISD::FREM, ISD::FSQRT, ISD::FSIN, ISD::FCOS,
+ ISD::FABS, ISD::FMINNUM, ISD::FMAXNUM}) {
+ setOperationAction(Op, MVT::f16, Promote);
+ setOperationAction(Op, MVT::f32, Legal);
+ setOperationAction(Op, MVT::f64, Legal);
+ setOperationAction(Op, MVT::v2f16, Expand);
+ }
+ setOperationAction(ISD::FMINNUM, MVT::f16, Promote);
+ setOperationAction(ISD::FMAXNUM, MVT::f16, Promote);
+ setOperationAction(ISD::FMINNAN, MVT::f16, Promote);
+ setOperationAction(ISD::FMAXNAN, MVT::f16, Promote);
+
+ // No FEXP2, FLOG2. The PTX ex2 and log2 functions are always approximate.
+ // No FPOW or FREM in PTX.
+
+ // Now deduce the information based on the above mentioned
+ // actions
+ computeRegisterProperties(STI.getRegisterInfo());
+}
+
+const char *NVPTXTargetLowering::getTargetNodeName(unsigned Opcode) const {
+ switch ((NVPTXISD::NodeType)Opcode) {
+ case NVPTXISD::FIRST_NUMBER:
+ break;
+ case NVPTXISD::CALL:
+ return "NVPTXISD::CALL";
+ case NVPTXISD::RET_FLAG:
+ return "NVPTXISD::RET_FLAG";
+ case NVPTXISD::LOAD_PARAM:
+ return "NVPTXISD::LOAD_PARAM";
+ case NVPTXISD::Wrapper:
+ return "NVPTXISD::Wrapper";
+ case NVPTXISD::DeclareParam:
+ return "NVPTXISD::DeclareParam";
+ case NVPTXISD::DeclareScalarParam:
+ return "NVPTXISD::DeclareScalarParam";
+ case NVPTXISD::DeclareRet:
+ return "NVPTXISD::DeclareRet";
+ case NVPTXISD::DeclareScalarRet:
+ return "NVPTXISD::DeclareScalarRet";
+ case NVPTXISD::DeclareRetParam:
+ return "NVPTXISD::DeclareRetParam";
+ case NVPTXISD::PrintCall:
+ return "NVPTXISD::PrintCall";
+ case NVPTXISD::PrintConvergentCall:
+ return "NVPTXISD::PrintConvergentCall";
+ case NVPTXISD::PrintCallUni:
+ return "NVPTXISD::PrintCallUni";
+ case NVPTXISD::PrintConvergentCallUni:
+ return "NVPTXISD::PrintConvergentCallUni";
+ case NVPTXISD::LoadParam:
+ return "NVPTXISD::LoadParam";
+ case NVPTXISD::LoadParamV2:
+ return "NVPTXISD::LoadParamV2";
+ case NVPTXISD::LoadParamV4:
+ return "NVPTXISD::LoadParamV4";
+ case NVPTXISD::StoreParam:
+ return "NVPTXISD::StoreParam";
+ case NVPTXISD::StoreParamV2:
+ return "NVPTXISD::StoreParamV2";
+ case NVPTXISD::StoreParamV4:
+ return "NVPTXISD::StoreParamV4";
+ case NVPTXISD::StoreParamS32:
+ return "NVPTXISD::StoreParamS32";
+ case NVPTXISD::StoreParamU32:
+ return "NVPTXISD::StoreParamU32";
+ case NVPTXISD::CallArgBegin:
+ return "NVPTXISD::CallArgBegin";
+ case NVPTXISD::CallArg:
+ return "NVPTXISD::CallArg";
+ case NVPTXISD::LastCallArg:
+ return "NVPTXISD::LastCallArg";
+ case NVPTXISD::CallArgEnd:
+ return "NVPTXISD::CallArgEnd";
+ case NVPTXISD::CallVoid:
+ return "NVPTXISD::CallVoid";
+ case NVPTXISD::CallVal:
+ return "NVPTXISD::CallVal";
+ case NVPTXISD::CallSymbol:
+ return "NVPTXISD::CallSymbol";
+ case NVPTXISD::Prototype:
+ return "NVPTXISD::Prototype";
+ case NVPTXISD::MoveParam:
+ return "NVPTXISD::MoveParam";
+ case NVPTXISD::StoreRetval:
+ return "NVPTXISD::StoreRetval";
+ case NVPTXISD::StoreRetvalV2:
+ return "NVPTXISD::StoreRetvalV2";
+ case NVPTXISD::StoreRetvalV4:
+ return "NVPTXISD::StoreRetvalV4";
+ case NVPTXISD::PseudoUseParam:
+ return "NVPTXISD::PseudoUseParam";
+ case NVPTXISD::RETURN:
+ return "NVPTXISD::RETURN";
+ case NVPTXISD::CallSeqBegin:
+ return "NVPTXISD::CallSeqBegin";
+ case NVPTXISD::CallSeqEnd:
+ return "NVPTXISD::CallSeqEnd";
+ case NVPTXISD::CallPrototype:
+ return "NVPTXISD::CallPrototype";
+ case NVPTXISD::LoadV2:
+ return "NVPTXISD::LoadV2";
+ case NVPTXISD::LoadV4:
+ return "NVPTXISD::LoadV4";
+ case NVPTXISD::LDGV2:
+ return "NVPTXISD::LDGV2";
+ case NVPTXISD::LDGV4:
+ return "NVPTXISD::LDGV4";
+ case NVPTXISD::LDUV2:
+ return "NVPTXISD::LDUV2";
+ case NVPTXISD::LDUV4:
+ return "NVPTXISD::LDUV4";
+ case NVPTXISD::StoreV2:
+ return "NVPTXISD::StoreV2";
+ case NVPTXISD::StoreV4:
+ return "NVPTXISD::StoreV4";
+ case NVPTXISD::FUN_SHFL_CLAMP:
+ return "NVPTXISD::FUN_SHFL_CLAMP";
+ case NVPTXISD::FUN_SHFR_CLAMP:
+ return "NVPTXISD::FUN_SHFR_CLAMP";
+ case NVPTXISD::IMAD:
+ return "NVPTXISD::IMAD";
+ case NVPTXISD::SETP_F16X2:
+ return "NVPTXISD::SETP_F16X2";
+ case NVPTXISD::Dummy:
+ return "NVPTXISD::Dummy";
+ case NVPTXISD::MUL_WIDE_SIGNED:
+ return "NVPTXISD::MUL_WIDE_SIGNED";
+ case NVPTXISD::MUL_WIDE_UNSIGNED:
+ return "NVPTXISD::MUL_WIDE_UNSIGNED";
+ case NVPTXISD::Tex1DFloatS32: return "NVPTXISD::Tex1DFloatS32";
+ case NVPTXISD::Tex1DFloatFloat: return "NVPTXISD::Tex1DFloatFloat";
+ case NVPTXISD::Tex1DFloatFloatLevel:
+ return "NVPTXISD::Tex1DFloatFloatLevel";
+ case NVPTXISD::Tex1DFloatFloatGrad:
+ return "NVPTXISD::Tex1DFloatFloatGrad";
+ case NVPTXISD::Tex1DS32S32: return "NVPTXISD::Tex1DS32S32";
+ case NVPTXISD::Tex1DS32Float: return "NVPTXISD::Tex1DS32Float";
+ case NVPTXISD::Tex1DS32FloatLevel:
+ return "NVPTXISD::Tex1DS32FloatLevel";
+ case NVPTXISD::Tex1DS32FloatGrad:
+ return "NVPTXISD::Tex1DS32FloatGrad";
+ case NVPTXISD::Tex1DU32S32: return "NVPTXISD::Tex1DU32S32";
+ case NVPTXISD::Tex1DU32Float: return "NVPTXISD::Tex1DU32Float";
+ case NVPTXISD::Tex1DU32FloatLevel:
+ return "NVPTXISD::Tex1DU32FloatLevel";
+ case NVPTXISD::Tex1DU32FloatGrad:
+ return "NVPTXISD::Tex1DU32FloatGrad";
+ case NVPTXISD::Tex1DArrayFloatS32: return "NVPTXISD::Tex1DArrayFloatS32";
+ case NVPTXISD::Tex1DArrayFloatFloat: return "NVPTXISD::Tex1DArrayFloatFloat";
+ case NVPTXISD::Tex1DArrayFloatFloatLevel:
+ return "NVPTXISD::Tex1DArrayFloatFloatLevel";
+ case NVPTXISD::Tex1DArrayFloatFloatGrad:
+ return "NVPTXISD::Tex1DArrayFloatFloatGrad";
+ case NVPTXISD::Tex1DArrayS32S32: return "NVPTXISD::Tex1DArrayS32S32";
+ case NVPTXISD::Tex1DArrayS32Float: return "NVPTXISD::Tex1DArrayS32Float";
+ case NVPTXISD::Tex1DArrayS32FloatLevel:
+ return "NVPTXISD::Tex1DArrayS32FloatLevel";
+ case NVPTXISD::Tex1DArrayS32FloatGrad:
+ return "NVPTXISD::Tex1DArrayS32FloatGrad";
+ case NVPTXISD::Tex1DArrayU32S32: return "NVPTXISD::Tex1DArrayU32S32";
+ case NVPTXISD::Tex1DArrayU32Float: return "NVPTXISD::Tex1DArrayU32Float";
+ case NVPTXISD::Tex1DArrayU32FloatLevel:
+ return "NVPTXISD::Tex1DArrayU32FloatLevel";
+ case NVPTXISD::Tex1DArrayU32FloatGrad:
+ return "NVPTXISD::Tex1DArrayU32FloatGrad";
+ case NVPTXISD::Tex2DFloatS32: return "NVPTXISD::Tex2DFloatS32";
+ case NVPTXISD::Tex2DFloatFloat: return "NVPTXISD::Tex2DFloatFloat";
+ case NVPTXISD::Tex2DFloatFloatLevel:
+ return "NVPTXISD::Tex2DFloatFloatLevel";
+ case NVPTXISD::Tex2DFloatFloatGrad:
+ return "NVPTXISD::Tex2DFloatFloatGrad";
+ case NVPTXISD::Tex2DS32S32: return "NVPTXISD::Tex2DS32S32";
+ case NVPTXISD::Tex2DS32Float: return "NVPTXISD::Tex2DS32Float";
+ case NVPTXISD::Tex2DS32FloatLevel:
+ return "NVPTXISD::Tex2DS32FloatLevel";
+ case NVPTXISD::Tex2DS32FloatGrad:
+ return "NVPTXISD::Tex2DS32FloatGrad";
+ case NVPTXISD::Tex2DU32S32: return "NVPTXISD::Tex2DU32S32";
+ case NVPTXISD::Tex2DU32Float: return "NVPTXISD::Tex2DU32Float";
+ case NVPTXISD::Tex2DU32FloatLevel:
+ return "NVPTXISD::Tex2DU32FloatLevel";
+ case NVPTXISD::Tex2DU32FloatGrad:
+ return "NVPTXISD::Tex2DU32FloatGrad";
+ case NVPTXISD::Tex2DArrayFloatS32: return "NVPTXISD::Tex2DArrayFloatS32";
+ case NVPTXISD::Tex2DArrayFloatFloat: return "NVPTXISD::Tex2DArrayFloatFloat";
+ case NVPTXISD::Tex2DArrayFloatFloatLevel:
+ return "NVPTXISD::Tex2DArrayFloatFloatLevel";
+ case NVPTXISD::Tex2DArrayFloatFloatGrad:
+ return "NVPTXISD::Tex2DArrayFloatFloatGrad";
+ case NVPTXISD::Tex2DArrayS32S32: return "NVPTXISD::Tex2DArrayS32S32";
+ case NVPTXISD::Tex2DArrayS32Float: return "NVPTXISD::Tex2DArrayS32Float";
+ case NVPTXISD::Tex2DArrayS32FloatLevel:
+ return "NVPTXISD::Tex2DArrayS32FloatLevel";
+ case NVPTXISD::Tex2DArrayS32FloatGrad:
+ return "NVPTXISD::Tex2DArrayS32FloatGrad";
+ case NVPTXISD::Tex2DArrayU32S32: return "NVPTXISD::Tex2DArrayU32S32";
+ case NVPTXISD::Tex2DArrayU32Float: return "NVPTXISD::Tex2DArrayU32Float";
+ case NVPTXISD::Tex2DArrayU32FloatLevel:
+ return "NVPTXISD::Tex2DArrayU32FloatLevel";
+ case NVPTXISD::Tex2DArrayU32FloatGrad:
+ return "NVPTXISD::Tex2DArrayU32FloatGrad";
+ case NVPTXISD::Tex3DFloatS32: return "NVPTXISD::Tex3DFloatS32";
+ case NVPTXISD::Tex3DFloatFloat: return "NVPTXISD::Tex3DFloatFloat";
+ case NVPTXISD::Tex3DFloatFloatLevel:
+ return "NVPTXISD::Tex3DFloatFloatLevel";
+ case NVPTXISD::Tex3DFloatFloatGrad:
+ return "NVPTXISD::Tex3DFloatFloatGrad";
+ case NVPTXISD::Tex3DS32S32: return "NVPTXISD::Tex3DS32S32";
+ case NVPTXISD::Tex3DS32Float: return "NVPTXISD::Tex3DS32Float";
+ case NVPTXISD::Tex3DS32FloatLevel:
+ return "NVPTXISD::Tex3DS32FloatLevel";
+ case NVPTXISD::Tex3DS32FloatGrad:
+ return "NVPTXISD::Tex3DS32FloatGrad";
+ case NVPTXISD::Tex3DU32S32: return "NVPTXISD::Tex3DU32S32";
+ case NVPTXISD::Tex3DU32Float: return "NVPTXISD::Tex3DU32Float";
+ case NVPTXISD::Tex3DU32FloatLevel:
+ return "NVPTXISD::Tex3DU32FloatLevel";
+ case NVPTXISD::Tex3DU32FloatGrad:
+ return "NVPTXISD::Tex3DU32FloatGrad";
+ case NVPTXISD::TexCubeFloatFloat: return "NVPTXISD::TexCubeFloatFloat";
+ case NVPTXISD::TexCubeFloatFloatLevel:
+ return "NVPTXISD::TexCubeFloatFloatLevel";
+ case NVPTXISD::TexCubeS32Float: return "NVPTXISD::TexCubeS32Float";
+ case NVPTXISD::TexCubeS32FloatLevel:
+ return "NVPTXISD::TexCubeS32FloatLevel";
+ case NVPTXISD::TexCubeU32Float: return "NVPTXISD::TexCubeU32Float";
+ case NVPTXISD::TexCubeU32FloatLevel:
+ return "NVPTXISD::TexCubeU32FloatLevel";
+ case NVPTXISD::TexCubeArrayFloatFloat:
+ return "NVPTXISD::TexCubeArrayFloatFloat";
+ case NVPTXISD::TexCubeArrayFloatFloatLevel:
+ return "NVPTXISD::TexCubeArrayFloatFloatLevel";
+ case NVPTXISD::TexCubeArrayS32Float:
+ return "NVPTXISD::TexCubeArrayS32Float";
+ case NVPTXISD::TexCubeArrayS32FloatLevel:
+ return "NVPTXISD::TexCubeArrayS32FloatLevel";
+ case NVPTXISD::TexCubeArrayU32Float:
+ return "NVPTXISD::TexCubeArrayU32Float";
+ case NVPTXISD::TexCubeArrayU32FloatLevel:
+ return "NVPTXISD::TexCubeArrayU32FloatLevel";
+ case NVPTXISD::Tld4R2DFloatFloat:
+ return "NVPTXISD::Tld4R2DFloatFloat";
+ case NVPTXISD::Tld4G2DFloatFloat:
+ return "NVPTXISD::Tld4G2DFloatFloat";
+ case NVPTXISD::Tld4B2DFloatFloat:
+ return "NVPTXISD::Tld4B2DFloatFloat";
+ case NVPTXISD::Tld4A2DFloatFloat:
+ return "NVPTXISD::Tld4A2DFloatFloat";
+ case NVPTXISD::Tld4R2DS64Float:
+ return "NVPTXISD::Tld4R2DS64Float";
+ case NVPTXISD::Tld4G2DS64Float:
+ return "NVPTXISD::Tld4G2DS64Float";
+ case NVPTXISD::Tld4B2DS64Float:
+ return "NVPTXISD::Tld4B2DS64Float";
+ case NVPTXISD::Tld4A2DS64Float:
+ return "NVPTXISD::Tld4A2DS64Float";
+ case NVPTXISD::Tld4R2DU64Float:
+ return "NVPTXISD::Tld4R2DU64Float";
+ case NVPTXISD::Tld4G2DU64Float:
+ return "NVPTXISD::Tld4G2DU64Float";
+ case NVPTXISD::Tld4B2DU64Float:
+ return "NVPTXISD::Tld4B2DU64Float";
+ case NVPTXISD::Tld4A2DU64Float:
+ return "NVPTXISD::Tld4A2DU64Float";
+
+ case NVPTXISD::TexUnified1DFloatS32:
+ return "NVPTXISD::TexUnified1DFloatS32";
+ case NVPTXISD::TexUnified1DFloatFloat:
+ return "NVPTXISD::TexUnified1DFloatFloat";
+ case NVPTXISD::TexUnified1DFloatFloatLevel:
+ return "NVPTXISD::TexUnified1DFloatFloatLevel";
+ case NVPTXISD::TexUnified1DFloatFloatGrad:
+ return "NVPTXISD::TexUnified1DFloatFloatGrad";
+ case NVPTXISD::TexUnified1DS32S32:
+ return "NVPTXISD::TexUnified1DS32S32";
+ case NVPTXISD::TexUnified1DS32Float:
+ return "NVPTXISD::TexUnified1DS32Float";
+ case NVPTXISD::TexUnified1DS32FloatLevel:
+ return "NVPTXISD::TexUnified1DS32FloatLevel";
+ case NVPTXISD::TexUnified1DS32FloatGrad:
+ return "NVPTXISD::TexUnified1DS32FloatGrad";
+ case NVPTXISD::TexUnified1DU32S32:
+ return "NVPTXISD::TexUnified1DU32S32";
+ case NVPTXISD::TexUnified1DU32Float:
+ return "NVPTXISD::TexUnified1DU32Float";
+ case NVPTXISD::TexUnified1DU32FloatLevel:
+ return "NVPTXISD::TexUnified1DU32FloatLevel";
+ case NVPTXISD::TexUnified1DU32FloatGrad:
+ return "NVPTXISD::TexUnified1DU32FloatGrad";
+ case NVPTXISD::TexUnified1DArrayFloatS32:
+ return "NVPTXISD::TexUnified1DArrayFloatS32";
+ case NVPTXISD::TexUnified1DArrayFloatFloat:
+ return "NVPTXISD::TexUnified1DArrayFloatFloat";
+ case NVPTXISD::TexUnified1DArrayFloatFloatLevel:
+ return "NVPTXISD::TexUnified1DArrayFloatFloatLevel";
+ case NVPTXISD::TexUnified1DArrayFloatFloatGrad:
+ return "NVPTXISD::TexUnified1DArrayFloatFloatGrad";
+ case NVPTXISD::TexUnified1DArrayS32S32:
+ return "NVPTXISD::TexUnified1DArrayS32S32";
+ case NVPTXISD::TexUnified1DArrayS32Float:
+ return "NVPTXISD::TexUnified1DArrayS32Float";
+ case NVPTXISD::TexUnified1DArrayS32FloatLevel:
+ return "NVPTXISD::TexUnified1DArrayS32FloatLevel";
+ case NVPTXISD::TexUnified1DArrayS32FloatGrad:
+ return "NVPTXISD::TexUnified1DArrayS32FloatGrad";
+ case NVPTXISD::TexUnified1DArrayU32S32:
+ return "NVPTXISD::TexUnified1DArrayU32S32";
+ case NVPTXISD::TexUnified1DArrayU32Float:
+ return "NVPTXISD::TexUnified1DArrayU32Float";
+ case NVPTXISD::TexUnified1DArrayU32FloatLevel:
+ return "NVPTXISD::TexUnified1DArrayU32FloatLevel";
+ case NVPTXISD::TexUnified1DArrayU32FloatGrad:
+ return "NVPTXISD::TexUnified1DArrayU32FloatGrad";
+ case NVPTXISD::TexUnified2DFloatS32:
+ return "NVPTXISD::TexUnified2DFloatS32";
+ case NVPTXISD::TexUnified2DFloatFloat:
+ return "NVPTXISD::TexUnified2DFloatFloat";
+ case NVPTXISD::TexUnified2DFloatFloatLevel:
+ return "NVPTXISD::TexUnified2DFloatFloatLevel";
+ case NVPTXISD::TexUnified2DFloatFloatGrad:
+ return "NVPTXISD::TexUnified2DFloatFloatGrad";
+ case NVPTXISD::TexUnified2DS32S32:
+ return "NVPTXISD::TexUnified2DS32S32";
+ case NVPTXISD::TexUnified2DS32Float:
+ return "NVPTXISD::TexUnified2DS32Float";
+ case NVPTXISD::TexUnified2DS32FloatLevel:
+ return "NVPTXISD::TexUnified2DS32FloatLevel";
+ case NVPTXISD::TexUnified2DS32FloatGrad:
+ return "NVPTXISD::TexUnified2DS32FloatGrad";
+ case NVPTXISD::TexUnified2DU32S32:
+ return "NVPTXISD::TexUnified2DU32S32";
+ case NVPTXISD::TexUnified2DU32Float:
+ return "NVPTXISD::TexUnified2DU32Float";
+ case NVPTXISD::TexUnified2DU32FloatLevel:
+ return "NVPTXISD::TexUnified2DU32FloatLevel";
+ case NVPTXISD::TexUnified2DU32FloatGrad:
+ return "NVPTXISD::TexUnified2DU32FloatGrad";
+ case NVPTXISD::TexUnified2DArrayFloatS32:
+ return "NVPTXISD::TexUnified2DArrayFloatS32";
+ case NVPTXISD::TexUnified2DArrayFloatFloat:
+ return "NVPTXISD::TexUnified2DArrayFloatFloat";
+ case NVPTXISD::TexUnified2DArrayFloatFloatLevel:
+ return "NVPTXISD::TexUnified2DArrayFloatFloatLevel";
+ case NVPTXISD::TexUnified2DArrayFloatFloatGrad:
+ return "NVPTXISD::TexUnified2DArrayFloatFloatGrad";
+ case NVPTXISD::TexUnified2DArrayS32S32:
+ return "NVPTXISD::TexUnified2DArrayS32S32";
+ case NVPTXISD::TexUnified2DArrayS32Float:
+ return "NVPTXISD::TexUnified2DArrayS32Float";
+ case NVPTXISD::TexUnified2DArrayS32FloatLevel:
+ return "NVPTXISD::TexUnified2DArrayS32FloatLevel";
+ case NVPTXISD::TexUnified2DArrayS32FloatGrad:
+ return "NVPTXISD::TexUnified2DArrayS32FloatGrad";
+ case NVPTXISD::TexUnified2DArrayU32S32:
+ return "NVPTXISD::TexUnified2DArrayU32S32";
+ case NVPTXISD::TexUnified2DArrayU32Float:
+ return "NVPTXISD::TexUnified2DArrayU32Float";
+ case NVPTXISD::TexUnified2DArrayU32FloatLevel:
+ return "NVPTXISD::TexUnified2DArrayU32FloatLevel";
+ case NVPTXISD::TexUnified2DArrayU32FloatGrad:
+ return "NVPTXISD::TexUnified2DArrayU32FloatGrad";
+ case NVPTXISD::TexUnified3DFloatS32:
+ return "NVPTXISD::TexUnified3DFloatS32";
+ case NVPTXISD::TexUnified3DFloatFloat:
+ return "NVPTXISD::TexUnified3DFloatFloat";
+ case NVPTXISD::TexUnified3DFloatFloatLevel:
+ return "NVPTXISD::TexUnified3DFloatFloatLevel";
+ case NVPTXISD::TexUnified3DFloatFloatGrad:
+ return "NVPTXISD::TexUnified3DFloatFloatGrad";
+ case NVPTXISD::TexUnified3DS32S32:
+ return "NVPTXISD::TexUnified3DS32S32";
+ case NVPTXISD::TexUnified3DS32Float:
+ return "NVPTXISD::TexUnified3DS32Float";
+ case NVPTXISD::TexUnified3DS32FloatLevel:
+ return "NVPTXISD::TexUnified3DS32FloatLevel";
+ case NVPTXISD::TexUnified3DS32FloatGrad:
+ return "NVPTXISD::TexUnified3DS32FloatGrad";
+ case NVPTXISD::TexUnified3DU32S32:
+ return "NVPTXISD::TexUnified3DU32S32";
+ case NVPTXISD::TexUnified3DU32Float:
+ return "NVPTXISD::TexUnified3DU32Float";
+ case NVPTXISD::TexUnified3DU32FloatLevel:
+ return "NVPTXISD::TexUnified3DU32FloatLevel";
+ case NVPTXISD::TexUnified3DU32FloatGrad:
+ return "NVPTXISD::TexUnified3DU32FloatGrad";
+ case NVPTXISD::TexUnifiedCubeFloatFloat:
+ return "NVPTXISD::TexUnifiedCubeFloatFloat";
+ case NVPTXISD::TexUnifiedCubeFloatFloatLevel:
+ return "NVPTXISD::TexUnifiedCubeFloatFloatLevel";
+ case NVPTXISD::TexUnifiedCubeS32Float:
+ return "NVPTXISD::TexUnifiedCubeS32Float";
+ case NVPTXISD::TexUnifiedCubeS32FloatLevel:
+ return "NVPTXISD::TexUnifiedCubeS32FloatLevel";
+ case NVPTXISD::TexUnifiedCubeU32Float:
+ return "NVPTXISD::TexUnifiedCubeU32Float";
+ case NVPTXISD::TexUnifiedCubeU32FloatLevel:
+ return "NVPTXISD::TexUnifiedCubeU32FloatLevel";
+ case NVPTXISD::TexUnifiedCubeArrayFloatFloat:
+ return "NVPTXISD::TexUnifiedCubeArrayFloatFloat";
+ case NVPTXISD::TexUnifiedCubeArrayFloatFloatLevel:
+ return "NVPTXISD::TexUnifiedCubeArrayFloatFloatLevel";
+ case NVPTXISD::TexUnifiedCubeArrayS32Float:
+ return "NVPTXISD::TexUnifiedCubeArrayS32Float";
+ case NVPTXISD::TexUnifiedCubeArrayS32FloatLevel:
+ return "NVPTXISD::TexUnifiedCubeArrayS32FloatLevel";
+ case NVPTXISD::TexUnifiedCubeArrayU32Float:
+ return "NVPTXISD::TexUnifiedCubeArrayU32Float";
+ case NVPTXISD::TexUnifiedCubeArrayU32FloatLevel:
+ return "NVPTXISD::TexUnifiedCubeArrayU32FloatLevel";
+ case NVPTXISD::Tld4UnifiedR2DFloatFloat:
+ return "NVPTXISD::Tld4UnifiedR2DFloatFloat";
+ case NVPTXISD::Tld4UnifiedG2DFloatFloat:
+ return "NVPTXISD::Tld4UnifiedG2DFloatFloat";
+ case NVPTXISD::Tld4UnifiedB2DFloatFloat:
+ return "NVPTXISD::Tld4UnifiedB2DFloatFloat";
+ case NVPTXISD::Tld4UnifiedA2DFloatFloat:
+ return "NVPTXISD::Tld4UnifiedA2DFloatFloat";
+ case NVPTXISD::Tld4UnifiedR2DS64Float:
+ return "NVPTXISD::Tld4UnifiedR2DS64Float";
+ case NVPTXISD::Tld4UnifiedG2DS64Float:
+ return "NVPTXISD::Tld4UnifiedG2DS64Float";
+ case NVPTXISD::Tld4UnifiedB2DS64Float:
+ return "NVPTXISD::Tld4UnifiedB2DS64Float";
+ case NVPTXISD::Tld4UnifiedA2DS64Float:
+ return "NVPTXISD::Tld4UnifiedA2DS64Float";
+ case NVPTXISD::Tld4UnifiedR2DU64Float:
+ return "NVPTXISD::Tld4UnifiedR2DU64Float";
+ case NVPTXISD::Tld4UnifiedG2DU64Float:
+ return "NVPTXISD::Tld4UnifiedG2DU64Float";
+ case NVPTXISD::Tld4UnifiedB2DU64Float:
+ return "NVPTXISD::Tld4UnifiedB2DU64Float";
+ case NVPTXISD::Tld4UnifiedA2DU64Float:
+ return "NVPTXISD::Tld4UnifiedA2DU64Float";
+
+ case NVPTXISD::Suld1DI8Clamp: return "NVPTXISD::Suld1DI8Clamp";
+ case NVPTXISD::Suld1DI16Clamp: return "NVPTXISD::Suld1DI16Clamp";
+ case NVPTXISD::Suld1DI32Clamp: return "NVPTXISD::Suld1DI32Clamp";
+ case NVPTXISD::Suld1DI64Clamp: return "NVPTXISD::Suld1DI64Clamp";
+ case NVPTXISD::Suld1DV2I8Clamp: return "NVPTXISD::Suld1DV2I8Clamp";
+ case NVPTXISD::Suld1DV2I16Clamp: return "NVPTXISD::Suld1DV2I16Clamp";
+ case NVPTXISD::Suld1DV2I32Clamp: return "NVPTXISD::Suld1DV2I32Clamp";
+ case NVPTXISD::Suld1DV2I64Clamp: return "NVPTXISD::Suld1DV2I64Clamp";
+ case NVPTXISD::Suld1DV4I8Clamp: return "NVPTXISD::Suld1DV4I8Clamp";
+ case NVPTXISD::Suld1DV4I16Clamp: return "NVPTXISD::Suld1DV4I16Clamp";
+ case NVPTXISD::Suld1DV4I32Clamp: return "NVPTXISD::Suld1DV4I32Clamp";
+
+ case NVPTXISD::Suld1DArrayI8Clamp: return "NVPTXISD::Suld1DArrayI8Clamp";
+ case NVPTXISD::Suld1DArrayI16Clamp: return "NVPTXISD::Suld1DArrayI16Clamp";
+ case NVPTXISD::Suld1DArrayI32Clamp: return "NVPTXISD::Suld1DArrayI32Clamp";
+ case NVPTXISD::Suld1DArrayI64Clamp: return "NVPTXISD::Suld1DArrayI64Clamp";
+ case NVPTXISD::Suld1DArrayV2I8Clamp: return "NVPTXISD::Suld1DArrayV2I8Clamp";
+ case NVPTXISD::Suld1DArrayV2I16Clamp:return "NVPTXISD::Suld1DArrayV2I16Clamp";
+ case NVPTXISD::Suld1DArrayV2I32Clamp:return "NVPTXISD::Suld1DArrayV2I32Clamp";
+ case NVPTXISD::Suld1DArrayV2I64Clamp:return "NVPTXISD::Suld1DArrayV2I64Clamp";
+ case NVPTXISD::Suld1DArrayV4I8Clamp: return "NVPTXISD::Suld1DArrayV4I8Clamp";
+ case NVPTXISD::Suld1DArrayV4I16Clamp:return "NVPTXISD::Suld1DArrayV4I16Clamp";
+ case NVPTXISD::Suld1DArrayV4I32Clamp:return "NVPTXISD::Suld1DArrayV4I32Clamp";
+
+ case NVPTXISD::Suld2DI8Clamp: return "NVPTXISD::Suld2DI8Clamp";
+ case NVPTXISD::Suld2DI16Clamp: return "NVPTXISD::Suld2DI16Clamp";
+ case NVPTXISD::Suld2DI32Clamp: return "NVPTXISD::Suld2DI32Clamp";
+ case NVPTXISD::Suld2DI64Clamp: return "NVPTXISD::Suld2DI64Clamp";
+ case NVPTXISD::Suld2DV2I8Clamp: return "NVPTXISD::Suld2DV2I8Clamp";
+ case NVPTXISD::Suld2DV2I16Clamp: return "NVPTXISD::Suld2DV2I16Clamp";
+ case NVPTXISD::Suld2DV2I32Clamp: return "NVPTXISD::Suld2DV2I32Clamp";
+ case NVPTXISD::Suld2DV2I64Clamp: return "NVPTXISD::Suld2DV2I64Clamp";
+ case NVPTXISD::Suld2DV4I8Clamp: return "NVPTXISD::Suld2DV4I8Clamp";
+ case NVPTXISD::Suld2DV4I16Clamp: return "NVPTXISD::Suld2DV4I16Clamp";
+ case NVPTXISD::Suld2DV4I32Clamp: return "NVPTXISD::Suld2DV4I32Clamp";
+
+ case NVPTXISD::Suld2DArrayI8Clamp: return "NVPTXISD::Suld2DArrayI8Clamp";
+ case NVPTXISD::Suld2DArrayI16Clamp: return "NVPTXISD::Suld2DArrayI16Clamp";
+ case NVPTXISD::Suld2DArrayI32Clamp: return "NVPTXISD::Suld2DArrayI32Clamp";
+ case NVPTXISD::Suld2DArrayI64Clamp: return "NVPTXISD::Suld2DArrayI64Clamp";
+ case NVPTXISD::Suld2DArrayV2I8Clamp: return "NVPTXISD::Suld2DArrayV2I8Clamp";
+ case NVPTXISD::Suld2DArrayV2I16Clamp:return "NVPTXISD::Suld2DArrayV2I16Clamp";
+ case NVPTXISD::Suld2DArrayV2I32Clamp:return "NVPTXISD::Suld2DArrayV2I32Clamp";
+ case NVPTXISD::Suld2DArrayV2I64Clamp:return "NVPTXISD::Suld2DArrayV2I64Clamp";
+ case NVPTXISD::Suld2DArrayV4I8Clamp: return "NVPTXISD::Suld2DArrayV4I8Clamp";
+ case NVPTXISD::Suld2DArrayV4I16Clamp:return "NVPTXISD::Suld2DArrayV4I16Clamp";
+ case NVPTXISD::Suld2DArrayV4I32Clamp:return "NVPTXISD::Suld2DArrayV4I32Clamp";
+
+ case NVPTXISD::Suld3DI8Clamp: return "NVPTXISD::Suld3DI8Clamp";
+ case NVPTXISD::Suld3DI16Clamp: return "NVPTXISD::Suld3DI16Clamp";
+ case NVPTXISD::Suld3DI32Clamp: return "NVPTXISD::Suld3DI32Clamp";
+ case NVPTXISD::Suld3DI64Clamp: return "NVPTXISD::Suld3DI64Clamp";
+ case NVPTXISD::Suld3DV2I8Clamp: return "NVPTXISD::Suld3DV2I8Clamp";
+ case NVPTXISD::Suld3DV2I16Clamp: return "NVPTXISD::Suld3DV2I16Clamp";
+ case NVPTXISD::Suld3DV2I32Clamp: return "NVPTXISD::Suld3DV2I32Clamp";
+ case NVPTXISD::Suld3DV2I64Clamp: return "NVPTXISD::Suld3DV2I64Clamp";
+ case NVPTXISD::Suld3DV4I8Clamp: return "NVPTXISD::Suld3DV4I8Clamp";
+ case NVPTXISD::Suld3DV4I16Clamp: return "NVPTXISD::Suld3DV4I16Clamp";
+ case NVPTXISD::Suld3DV4I32Clamp: return "NVPTXISD::Suld3DV4I32Clamp";
+
+ case NVPTXISD::Suld1DI8Trap: return "NVPTXISD::Suld1DI8Trap";
+ case NVPTXISD::Suld1DI16Trap: return "NVPTXISD::Suld1DI16Trap";
+ case NVPTXISD::Suld1DI32Trap: return "NVPTXISD::Suld1DI32Trap";
+ case NVPTXISD::Suld1DI64Trap: return "NVPTXISD::Suld1DI64Trap";
+ case NVPTXISD::Suld1DV2I8Trap: return "NVPTXISD::Suld1DV2I8Trap";
+ case NVPTXISD::Suld1DV2I16Trap: return "NVPTXISD::Suld1DV2I16Trap";
+ case NVPTXISD::Suld1DV2I32Trap: return "NVPTXISD::Suld1DV2I32Trap";
+ case NVPTXISD::Suld1DV2I64Trap: return "NVPTXISD::Suld1DV2I64Trap";
+ case NVPTXISD::Suld1DV4I8Trap: return "NVPTXISD::Suld1DV4I8Trap";
+ case NVPTXISD::Suld1DV4I16Trap: return "NVPTXISD::Suld1DV4I16Trap";
+ case NVPTXISD::Suld1DV4I32Trap: return "NVPTXISD::Suld1DV4I32Trap";
+
+ case NVPTXISD::Suld1DArrayI8Trap: return "NVPTXISD::Suld1DArrayI8Trap";
+ case NVPTXISD::Suld1DArrayI16Trap: return "NVPTXISD::Suld1DArrayI16Trap";
+ case NVPTXISD::Suld1DArrayI32Trap: return "NVPTXISD::Suld1DArrayI32Trap";
+ case NVPTXISD::Suld1DArrayI64Trap: return "NVPTXISD::Suld1DArrayI64Trap";
+ case NVPTXISD::Suld1DArrayV2I8Trap: return "NVPTXISD::Suld1DArrayV2I8Trap";
+ case NVPTXISD::Suld1DArrayV2I16Trap: return "NVPTXISD::Suld1DArrayV2I16Trap";
+ case NVPTXISD::Suld1DArrayV2I32Trap: return "NVPTXISD::Suld1DArrayV2I32Trap";
+ case NVPTXISD::Suld1DArrayV2I64Trap: return "NVPTXISD::Suld1DArrayV2I64Trap";
+ case NVPTXISD::Suld1DArrayV4I8Trap: return "NVPTXISD::Suld1DArrayV4I8Trap";
+ case NVPTXISD::Suld1DArrayV4I16Trap: return "NVPTXISD::Suld1DArrayV4I16Trap";
+ case NVPTXISD::Suld1DArrayV4I32Trap: return "NVPTXISD::Suld1DArrayV4I32Trap";
+
+ case NVPTXISD::Suld2DI8Trap: return "NVPTXISD::Suld2DI8Trap";
+ case NVPTXISD::Suld2DI16Trap: return "NVPTXISD::Suld2DI16Trap";
+ case NVPTXISD::Suld2DI32Trap: return "NVPTXISD::Suld2DI32Trap";
+ case NVPTXISD::Suld2DI64Trap: return "NVPTXISD::Suld2DI64Trap";
+ case NVPTXISD::Suld2DV2I8Trap: return "NVPTXISD::Suld2DV2I8Trap";
+ case NVPTXISD::Suld2DV2I16Trap: return "NVPTXISD::Suld2DV2I16Trap";
+ case NVPTXISD::Suld2DV2I32Trap: return "NVPTXISD::Suld2DV2I32Trap";
+ case NVPTXISD::Suld2DV2I64Trap: return "NVPTXISD::Suld2DV2I64Trap";
+ case NVPTXISD::Suld2DV4I8Trap: return "NVPTXISD::Suld2DV4I8Trap";
+ case NVPTXISD::Suld2DV4I16Trap: return "NVPTXISD::Suld2DV4I16Trap";
+ case NVPTXISD::Suld2DV4I32Trap: return "NVPTXISD::Suld2DV4I32Trap";
+
+ case NVPTXISD::Suld2DArrayI8Trap: return "NVPTXISD::Suld2DArrayI8Trap";
+ case NVPTXISD::Suld2DArrayI16Trap: return "NVPTXISD::Suld2DArrayI16Trap";
+ case NVPTXISD::Suld2DArrayI32Trap: return "NVPTXISD::Suld2DArrayI32Trap";
+ case NVPTXISD::Suld2DArrayI64Trap: return "NVPTXISD::Suld2DArrayI64Trap";
+ case NVPTXISD::Suld2DArrayV2I8Trap: return "NVPTXISD::Suld2DArrayV2I8Trap";
+ case NVPTXISD::Suld2DArrayV2I16Trap: return "NVPTXISD::Suld2DArrayV2I16Trap";
+ case NVPTXISD::Suld2DArrayV2I32Trap: return "NVPTXISD::Suld2DArrayV2I32Trap";
+ case NVPTXISD::Suld2DArrayV2I64Trap: return "NVPTXISD::Suld2DArrayV2I64Trap";
+ case NVPTXISD::Suld2DArrayV4I8Trap: return "NVPTXISD::Suld2DArrayV4I8Trap";
+ case NVPTXISD::Suld2DArrayV4I16Trap: return "NVPTXISD::Suld2DArrayV4I16Trap";
+ case NVPTXISD::Suld2DArrayV4I32Trap: return "NVPTXISD::Suld2DArrayV4I32Trap";
+
+ case NVPTXISD::Suld3DI8Trap: return "NVPTXISD::Suld3DI8Trap";
+ case NVPTXISD::Suld3DI16Trap: return "NVPTXISD::Suld3DI16Trap";
+ case NVPTXISD::Suld3DI32Trap: return "NVPTXISD::Suld3DI32Trap";
+ case NVPTXISD::Suld3DI64Trap: return "NVPTXISD::Suld3DI64Trap";
+ case NVPTXISD::Suld3DV2I8Trap: return "NVPTXISD::Suld3DV2I8Trap";
+ case NVPTXISD::Suld3DV2I16Trap: return "NVPTXISD::Suld3DV2I16Trap";
+ case NVPTXISD::Suld3DV2I32Trap: return "NVPTXISD::Suld3DV2I32Trap";
+ case NVPTXISD::Suld3DV2I64Trap: return "NVPTXISD::Suld3DV2I64Trap";
+ case NVPTXISD::Suld3DV4I8Trap: return "NVPTXISD::Suld3DV4I8Trap";
+ case NVPTXISD::Suld3DV4I16Trap: return "NVPTXISD::Suld3DV4I16Trap";
+ case NVPTXISD::Suld3DV4I32Trap: return "NVPTXISD::Suld3DV4I32Trap";
+
+ case NVPTXISD::Suld1DI8Zero: return "NVPTXISD::Suld1DI8Zero";
+ case NVPTXISD::Suld1DI16Zero: return "NVPTXISD::Suld1DI16Zero";
+ case NVPTXISD::Suld1DI32Zero: return "NVPTXISD::Suld1DI32Zero";
+ case NVPTXISD::Suld1DI64Zero: return "NVPTXISD::Suld1DI64Zero";
+ case NVPTXISD::Suld1DV2I8Zero: return "NVPTXISD::Suld1DV2I8Zero";
+ case NVPTXISD::Suld1DV2I16Zero: return "NVPTXISD::Suld1DV2I16Zero";
+ case NVPTXISD::Suld1DV2I32Zero: return "NVPTXISD::Suld1DV2I32Zero";
+ case NVPTXISD::Suld1DV2I64Zero: return "NVPTXISD::Suld1DV2I64Zero";
+ case NVPTXISD::Suld1DV4I8Zero: return "NVPTXISD::Suld1DV4I8Zero";
+ case NVPTXISD::Suld1DV4I16Zero: return "NVPTXISD::Suld1DV4I16Zero";
+ case NVPTXISD::Suld1DV4I32Zero: return "NVPTXISD::Suld1DV4I32Zero";
+
+ case NVPTXISD::Suld1DArrayI8Zero: return "NVPTXISD::Suld1DArrayI8Zero";
+ case NVPTXISD::Suld1DArrayI16Zero: return "NVPTXISD::Suld1DArrayI16Zero";
+ case NVPTXISD::Suld1DArrayI32Zero: return "NVPTXISD::Suld1DArrayI32Zero";
+ case NVPTXISD::Suld1DArrayI64Zero: return "NVPTXISD::Suld1DArrayI64Zero";
+ case NVPTXISD::Suld1DArrayV2I8Zero: return "NVPTXISD::Suld1DArrayV2I8Zero";
+ case NVPTXISD::Suld1DArrayV2I16Zero: return "NVPTXISD::Suld1DArrayV2I16Zero";
+ case NVPTXISD::Suld1DArrayV2I32Zero: return "NVPTXISD::Suld1DArrayV2I32Zero";
+ case NVPTXISD::Suld1DArrayV2I64Zero: return "NVPTXISD::Suld1DArrayV2I64Zero";
+ case NVPTXISD::Suld1DArrayV4I8Zero: return "NVPTXISD::Suld1DArrayV4I8Zero";
+ case NVPTXISD::Suld1DArrayV4I16Zero: return "NVPTXISD::Suld1DArrayV4I16Zero";
+ case NVPTXISD::Suld1DArrayV4I32Zero: return "NVPTXISD::Suld1DArrayV4I32Zero";
+
+ case NVPTXISD::Suld2DI8Zero: return "NVPTXISD::Suld2DI8Zero";
+ case NVPTXISD::Suld2DI16Zero: return "NVPTXISD::Suld2DI16Zero";
+ case NVPTXISD::Suld2DI32Zero: return "NVPTXISD::Suld2DI32Zero";
+ case NVPTXISD::Suld2DI64Zero: return "NVPTXISD::Suld2DI64Zero";
+ case NVPTXISD::Suld2DV2I8Zero: return "NVPTXISD::Suld2DV2I8Zero";
+ case NVPTXISD::Suld2DV2I16Zero: return "NVPTXISD::Suld2DV2I16Zero";
+ case NVPTXISD::Suld2DV2I32Zero: return "NVPTXISD::Suld2DV2I32Zero";
+ case NVPTXISD::Suld2DV2I64Zero: return "NVPTXISD::Suld2DV2I64Zero";
+ case NVPTXISD::Suld2DV4I8Zero: return "NVPTXISD::Suld2DV4I8Zero";
+ case NVPTXISD::Suld2DV4I16Zero: return "NVPTXISD::Suld2DV4I16Zero";
+ case NVPTXISD::Suld2DV4I32Zero: return "NVPTXISD::Suld2DV4I32Zero";
+
+ case NVPTXISD::Suld2DArrayI8Zero: return "NVPTXISD::Suld2DArrayI8Zero";
+ case NVPTXISD::Suld2DArrayI16Zero: return "NVPTXISD::Suld2DArrayI16Zero";
+ case NVPTXISD::Suld2DArrayI32Zero: return "NVPTXISD::Suld2DArrayI32Zero";
+ case NVPTXISD::Suld2DArrayI64Zero: return "NVPTXISD::Suld2DArrayI64Zero";
+ case NVPTXISD::Suld2DArrayV2I8Zero: return "NVPTXISD::Suld2DArrayV2I8Zero";
+ case NVPTXISD::Suld2DArrayV2I16Zero: return "NVPTXISD::Suld2DArrayV2I16Zero";
+ case NVPTXISD::Suld2DArrayV2I32Zero: return "NVPTXISD::Suld2DArrayV2I32Zero";
+ case NVPTXISD::Suld2DArrayV2I64Zero: return "NVPTXISD::Suld2DArrayV2I64Zero";
+ case NVPTXISD::Suld2DArrayV4I8Zero: return "NVPTXISD::Suld2DArrayV4I8Zero";
+ case NVPTXISD::Suld2DArrayV4I16Zero: return "NVPTXISD::Suld2DArrayV4I16Zero";
+ case NVPTXISD::Suld2DArrayV4I32Zero: return "NVPTXISD::Suld2DArrayV4I32Zero";
+
+ case NVPTXISD::Suld3DI8Zero: return "NVPTXISD::Suld3DI8Zero";
+ case NVPTXISD::Suld3DI16Zero: return "NVPTXISD::Suld3DI16Zero";
+ case NVPTXISD::Suld3DI32Zero: return "NVPTXISD::Suld3DI32Zero";
+ case NVPTXISD::Suld3DI64Zero: return "NVPTXISD::Suld3DI64Zero";
+ case NVPTXISD::Suld3DV2I8Zero: return "NVPTXISD::Suld3DV2I8Zero";
+ case NVPTXISD::Suld3DV2I16Zero: return "NVPTXISD::Suld3DV2I16Zero";
+ case NVPTXISD::Suld3DV2I32Zero: return "NVPTXISD::Suld3DV2I32Zero";
+ case NVPTXISD::Suld3DV2I64Zero: return "NVPTXISD::Suld3DV2I64Zero";
+ case NVPTXISD::Suld3DV4I8Zero: return "NVPTXISD::Suld3DV4I8Zero";
+ case NVPTXISD::Suld3DV4I16Zero: return "NVPTXISD::Suld3DV4I16Zero";
+ case NVPTXISD::Suld3DV4I32Zero: return "NVPTXISD::Suld3DV4I32Zero";
+ }
+ return nullptr;
+}
+
+TargetLoweringBase::LegalizeTypeAction
+NVPTXTargetLowering::getPreferredVectorAction(EVT VT) const {
+ if (VT.getVectorNumElements() != 1 && VT.getScalarType() == MVT::i1)
+ return TypeSplitVector;
+ if (VT == MVT::v2f16)
+ return TypeLegal;
+ return TargetLoweringBase::getPreferredVectorAction(VT);
+}
+
+SDValue NVPTXTargetLowering::getSqrtEstimate(SDValue Operand, SelectionDAG &DAG,
+ int Enabled, int &ExtraSteps,
+ bool &UseOneConst,
+ bool Reciprocal) const {
+ if (!(Enabled == ReciprocalEstimate::Enabled ||
+ (Enabled == ReciprocalEstimate::Unspecified && !usePrecSqrtF32())))
+ return SDValue();
+
+ if (ExtraSteps == ReciprocalEstimate::Unspecified)
+ ExtraSteps = 0;
+
+ SDLoc DL(Operand);
+ EVT VT = Operand.getValueType();
+ bool Ftz = useF32FTZ(DAG.getMachineFunction());
+
+ auto MakeIntrinsicCall = [&](Intrinsic::ID IID) {
+ return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, DL, VT,
+ DAG.getConstant(IID, DL, MVT::i32), Operand);
+ };
+
+ // The sqrt and rsqrt refinement processes assume we always start out with an
+ // approximation of the rsqrt. Therefore, if we're going to do any refinement
+ // (i.e. ExtraSteps > 0), we must return an rsqrt. But if we're *not* doing
+ // any refinement, we must return a regular sqrt.
+ if (Reciprocal || ExtraSteps > 0) {
+ if (VT == MVT::f32)
+ return MakeIntrinsicCall(Ftz ? Intrinsic::nvvm_rsqrt_approx_ftz_f
+ : Intrinsic::nvvm_rsqrt_approx_f);
+ else if (VT == MVT::f64)
+ return MakeIntrinsicCall(Intrinsic::nvvm_rsqrt_approx_d);
+ else
+ return SDValue();
+ } else {
+ if (VT == MVT::f32)
+ return MakeIntrinsicCall(Ftz ? Intrinsic::nvvm_sqrt_approx_ftz_f
+ : Intrinsic::nvvm_sqrt_approx_f);
+ else {
+ // There's no sqrt.approx.f64 instruction, so we emit
+ // reciprocal(rsqrt(x)). This is faster than
+ // select(x == 0, 0, x * rsqrt(x)). (In fact, it's faster than plain
+ // x * rsqrt(x).)
+ return DAG.getNode(
+ ISD::INTRINSIC_WO_CHAIN, DL, VT,
+ DAG.getConstant(Intrinsic::nvvm_rcp_approx_ftz_d, DL, MVT::i32),
+ MakeIntrinsicCall(Intrinsic::nvvm_rsqrt_approx_d));
+ }
+ }
+}
+
+SDValue
+NVPTXTargetLowering::LowerGlobalAddress(SDValue Op, SelectionDAG &DAG) const {
+ SDLoc dl(Op);
+ const GlobalValue *GV = cast<GlobalAddressSDNode>(Op)->getGlobal();
+ auto PtrVT = getPointerTy(DAG.getDataLayout());
+ Op = DAG.getTargetGlobalAddress(GV, dl, PtrVT);
+ return DAG.getNode(NVPTXISD::Wrapper, dl, PtrVT, Op);
+}
+
+std::string NVPTXTargetLowering::getPrototype(
+ const DataLayout &DL, Type *retTy, const ArgListTy &Args,
+ const SmallVectorImpl<ISD::OutputArg> &Outs, unsigned retAlignment,
+ const ImmutableCallSite *CS) const {
+ auto PtrVT = getPointerTy(DL);
+
+ bool isABI = (STI.getSmVersion() >= 20);
+ assert(isABI && "Non-ABI compilation is not supported");
+ if (!isABI)
+ return "";
+
+ std::stringstream O;
+ O << "prototype_" << uniqueCallSite << " : .callprototype ";
+
+ if (retTy->getTypeID() == Type::VoidTyID) {
+ O << "()";
+ } else {
+ O << "(";
+ if (retTy->isFloatingPointTy() || retTy->isIntegerTy()) {
+ unsigned size = 0;
+ if (auto *ITy = dyn_cast<IntegerType>(retTy)) {
+ size = ITy->getBitWidth();
+ } else {
+ assert(retTy->isFloatingPointTy() &&
+ "Floating point type expected here");
+ size = retTy->getPrimitiveSizeInBits();
+ }
+ // PTX ABI requires all scalar return values to be at least 32
+ // bits in size. fp16 normally uses .b16 as its storage type in
+ // PTX, so its size must be adjusted here, too.
+ if (size < 32)
+ size = 32;
+
+ O << ".param .b" << size << " _";
+ } else if (isa<PointerType>(retTy)) {
+ O << ".param .b" << PtrVT.getSizeInBits() << " _";
+ } else if (retTy->isAggregateType() || retTy->isVectorTy()) {
+ auto &DL = CS->getCalledFunction()->getParent()->getDataLayout();
+ O << ".param .align " << retAlignment << " .b8 _["
+ << DL.getTypeAllocSize(retTy) << "]";
+ } else {
+ llvm_unreachable("Unknown return type");
+ }
+ O << ") ";
+ }
+ O << "_ (";
+
+ bool first = true;
+
+ unsigned OIdx = 0;
+ for (unsigned i = 0, e = Args.size(); i != e; ++i, ++OIdx) {
+ Type *Ty = Args[i].Ty;
+ if (!first) {
+ O << ", ";
+ }
+ first = false;
+
+ if (!Outs[OIdx].Flags.isByVal()) {
+ if (Ty->isAggregateType() || Ty->isVectorTy()) {
+ unsigned align = 0;
+ const CallInst *CallI = cast<CallInst>(CS->getInstruction());
+ // +1 because index 0 is reserved for return type alignment
+ if (!getAlign(*CallI, i + 1, align))
+ align = DL.getABITypeAlignment(Ty);
+ unsigned sz = DL.getTypeAllocSize(Ty);
+ O << ".param .align " << align << " .b8 ";
+ O << "_";
+ O << "[" << sz << "]";
+ // update the index for Outs
+ SmallVector<EVT, 16> vtparts;
+ ComputeValueVTs(*this, DL, Ty, vtparts);
+ if (unsigned len = vtparts.size())
+ OIdx += len - 1;
+ continue;
+ }
+ // i8 types in IR will be i16 types in SDAG
+ assert((getValueType(DL, Ty) == Outs[OIdx].VT ||
+ (getValueType(DL, Ty) == MVT::i8 && Outs[OIdx].VT == MVT::i16)) &&
+ "type mismatch between callee prototype and arguments");
+ // scalar type
+ unsigned sz = 0;
+ if (isa<IntegerType>(Ty)) {
+ sz = cast<IntegerType>(Ty)->getBitWidth();
+ if (sz < 32)
+ sz = 32;
+ } else if (isa<PointerType>(Ty)) {
+ sz = PtrVT.getSizeInBits();
+ } else if (Ty->isHalfTy())
+ // PTX ABI requires all scalar parameters to be at least 32
+ // bits in size. fp16 normally uses .b16 as its storage type
+ // in PTX, so its size must be adjusted here, too.
+ sz = 32;
+ else
+ sz = Ty->getPrimitiveSizeInBits();
+ O << ".param .b" << sz << " ";
+ O << "_";
+ continue;
+ }
+ auto *PTy = dyn_cast<PointerType>(Ty);
+ assert(PTy && "Param with byval attribute should be a pointer type");
+ Type *ETy = PTy->getElementType();
+
+ unsigned align = Outs[OIdx].Flags.getByValAlign();
+ unsigned sz = DL.getTypeAllocSize(ETy);
+ O << ".param .align " << align << " .b8 ";
+ O << "_";
+ O << "[" << sz << "]";
+ }
+ O << ");";
+ return O.str();
+}
+
+unsigned NVPTXTargetLowering::getArgumentAlignment(SDValue Callee,
+ const ImmutableCallSite *CS,
+ Type *Ty, unsigned Idx,
+ const DataLayout &DL) const {
+ if (!CS) {
+ // CallSite is zero, fallback to ABI type alignment
+ return DL.getABITypeAlignment(Ty);
+ }
+
+ unsigned Align = 0;
+ const Value *DirectCallee = CS->getCalledFunction();
+
+ if (!DirectCallee) {
+ // We don't have a direct function symbol, but that may be because of
+ // constant cast instructions in the call.
+ const Instruction *CalleeI = CS->getInstruction();
+ assert(CalleeI && "Call target is not a function or derived value?");
+
+ // With bitcast'd call targets, the instruction will be the call
+ if (isa<CallInst>(CalleeI)) {
+ // Check if we have call alignment metadata
+ if (getAlign(*cast<CallInst>(CalleeI), Idx, Align))
+ return Align;
+
+ const Value *CalleeV = cast<CallInst>(CalleeI)->getCalledValue();
+ // Ignore any bitcast instructions
+ while (isa<ConstantExpr>(CalleeV)) {
+ const ConstantExpr *CE = cast<ConstantExpr>(CalleeV);
+ if (!CE->isCast())
+ break;
+ // Look through the bitcast
+ CalleeV = cast<ConstantExpr>(CalleeV)->getOperand(0);
+ }
+
+ // We have now looked past all of the bitcasts. Do we finally have a
+ // Function?
+ if (isa<Function>(CalleeV))
+ DirectCallee = CalleeV;
+ }
+ }
+
+ // Check for function alignment information if we found that the
+ // ultimate target is a Function
+ if (DirectCallee)
+ if (getAlign(*cast<Function>(DirectCallee), Idx, Align))
+ return Align;
+
+ // Call is indirect or alignment information is not available, fall back to
+ // the ABI type alignment
+ return DL.getABITypeAlignment(Ty);
+}
+
+SDValue NVPTXTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
+ SmallVectorImpl<SDValue> &InVals) const {
+ SelectionDAG &DAG = CLI.DAG;
+ SDLoc dl = CLI.DL;
+ SmallVectorImpl<ISD::OutputArg> &Outs = CLI.Outs;
+ SmallVectorImpl<SDValue> &OutVals = CLI.OutVals;
+ SmallVectorImpl<ISD::InputArg> &Ins = CLI.Ins;
+ SDValue Chain = CLI.Chain;
+ SDValue Callee = CLI.Callee;
+ bool &isTailCall = CLI.IsTailCall;
+ ArgListTy &Args = CLI.getArgs();
+ Type *RetTy = CLI.RetTy;
+ ImmutableCallSite *CS = CLI.CS;
+ const DataLayout &DL = DAG.getDataLayout();
+
+ bool isABI = (STI.getSmVersion() >= 20);
+ assert(isABI && "Non-ABI compilation is not supported");
+ if (!isABI)
+ return Chain;
+
+ SDValue tempChain = Chain;
+ Chain = DAG.getCALLSEQ_START(
+ Chain, DAG.getIntPtrConstant(uniqueCallSite, dl, true), dl);
+ SDValue InFlag = Chain.getValue(1);
+
+ unsigned paramCount = 0;
+ // Args.size() and Outs.size() need not match.
+ // Outs.size() will be larger
+ // * if there is an aggregate argument with multiple fields (each field
+ // showing up separately in Outs)
+ // * if there is a vector argument with more than typical vector-length
+ // elements (generally if more than 4) where each vector element is
+ // individually present in Outs.
+ // So a different index should be used for indexing into Outs/OutVals.
+ // See similar issue in LowerFormalArguments.
+ unsigned OIdx = 0;
+ // Declare the .params or .reg need to pass values
+ // to the function
+ for (unsigned i = 0, e = Args.size(); i != e; ++i, ++OIdx) {
+ EVT VT = Outs[OIdx].VT;
+ Type *Ty = Args[i].Ty;
+
+ if (!Outs[OIdx].Flags.isByVal()) {
+ SmallVector<EVT, 16> VTs;
+ SmallVector<uint64_t, 16> Offsets;
+ ComputePTXValueVTs(*this, DL, Ty, VTs, &Offsets);
+ unsigned ArgAlign =
+ getArgumentAlignment(Callee, CS, Ty, paramCount + 1, DL);
+ unsigned AllocSize = DL.getTypeAllocSize(Ty);
+ SDVTList DeclareParamVTs = DAG.getVTList(MVT::Other, MVT::Glue);
+ bool NeedAlign; // Does argument declaration specify alignment?
+ if (Ty->isAggregateType() || Ty->isVectorTy()) {
+ // declare .param .align <align> .b8 .param<n>[<size>];
+ SDValue DeclareParamOps[] = {
+ Chain, DAG.getConstant(ArgAlign, dl, MVT::i32),
+ DAG.getConstant(paramCount, dl, MVT::i32),
+ DAG.getConstant(AllocSize, dl, MVT::i32), InFlag};
+ Chain = DAG.getNode(NVPTXISD::DeclareParam, dl, DeclareParamVTs,
+ DeclareParamOps);
+ NeedAlign = true;
+ } else {
+ // declare .param .b<size> .param<n>;
+ if ((VT.isInteger() || VT.isFloatingPoint()) && AllocSize < 4) {
+ // PTX ABI requires integral types to be at least 32 bits in
+ // size. FP16 is loaded/stored using i16, so it's handled
+ // here as well.
+ AllocSize = 4;
+ }
+ SDValue DeclareScalarParamOps[] = {
+ Chain, DAG.getConstant(paramCount, dl, MVT::i32),
+ DAG.getConstant(AllocSize * 8, dl, MVT::i32),
+ DAG.getConstant(0, dl, MVT::i32), InFlag};
+ Chain = DAG.getNode(NVPTXISD::DeclareScalarParam, dl, DeclareParamVTs,
+ DeclareScalarParamOps);
+ NeedAlign = false;
+ }
+ InFlag = Chain.getValue(1);
+
+ // PTX Interoperability Guide 3.3(A): [Integer] Values shorter
+ // than 32-bits are sign extended or zero extended, depending on
+ // whether they are signed or unsigned types. This case applies
+ // only to scalar parameters and not to aggregate values.
+ bool ExtendIntegerParam =
+ Ty->isIntegerTy() && DL.getTypeAllocSizeInBits(Ty) < 32;
+
+ auto VectorInfo = VectorizePTXValueVTs(VTs, Offsets, ArgAlign);
+ SmallVector<SDValue, 6> StoreOperands;
+ for (unsigned j = 0, je = VTs.size(); j != je; ++j) {
+ // New store.
+ if (VectorInfo[j] & PVF_FIRST) {
+ assert(StoreOperands.empty() && "Unfinished preceeding store.");
+ StoreOperands.push_back(Chain);
+ StoreOperands.push_back(DAG.getConstant(paramCount, dl, MVT::i32));
+ StoreOperands.push_back(DAG.getConstant(Offsets[j], dl, MVT::i32));
+ }
+
+ EVT EltVT = VTs[j];
+ SDValue StVal = OutVals[OIdx];
+ if (ExtendIntegerParam) {
+ assert(VTs.size() == 1 && "Scalar can't have multiple parts.");
+ // zext/sext to i32
+ StVal = DAG.getNode(Outs[OIdx].Flags.isSExt() ? ISD::SIGN_EXTEND
+ : ISD::ZERO_EXTEND,
+ dl, MVT::i32, StVal);
+ } else if (EltVT.getSizeInBits() < 16) {
+ // Use 16-bit registers for small stores as it's the
+ // smallest general purpose register size supported by NVPTX.
+ StVal = DAG.getNode(ISD::ANY_EXTEND, dl, MVT::i16, StVal);
+ }
+
+ // Record the value to store.
+ StoreOperands.push_back(StVal);
+
+ if (VectorInfo[j] & PVF_LAST) {
+ unsigned NumElts = StoreOperands.size() - 3;
+ NVPTXISD::NodeType Op;
+ switch (NumElts) {
+ case 1:
+ Op = NVPTXISD::StoreParam;
+ break;
+ case 2:
+ Op = NVPTXISD::StoreParamV2;
+ break;
+ case 4:
+ Op = NVPTXISD::StoreParamV4;
+ break;
+ default:
+ llvm_unreachable("Invalid vector info.");
+ }
+
+ StoreOperands.push_back(InFlag);
+
+ // Adjust type of the store op if we've extended the scalar
+ // return value.
+ EVT TheStoreType = ExtendIntegerParam ? MVT::i32 : VTs[j];
+ unsigned EltAlign =
+ NeedAlign ? GreatestCommonDivisor64(ArgAlign, Offsets[j]) : 0;
+
+ Chain = DAG.getMemIntrinsicNode(
+ Op, dl, DAG.getVTList(MVT::Other, MVT::Glue), StoreOperands,
+ TheStoreType, MachinePointerInfo(), EltAlign);
+ InFlag = Chain.getValue(1);
+
+ // Cleanup.
+ StoreOperands.clear();
+ }
+ ++OIdx;
+ }
+ assert(StoreOperands.empty() && "Unfinished parameter store.");
+ if (VTs.size() > 0)
+ --OIdx;
+ ++paramCount;
+ continue;
+ }
+
+ // ByVal arguments
+ SmallVector<EVT, 16> VTs;
+ SmallVector<uint64_t, 16> Offsets;
+ auto *PTy = dyn_cast<PointerType>(Args[i].Ty);
+ assert(PTy && "Type of a byval parameter should be pointer");
+ ComputePTXValueVTs(*this, DL, PTy->getElementType(), VTs, &Offsets, 0);
+
+ // declare .param .align <align> .b8 .param<n>[<size>];
+ unsigned sz = Outs[OIdx].Flags.getByValSize();
+ SDVTList DeclareParamVTs = DAG.getVTList(MVT::Other, MVT::Glue);
+ unsigned ArgAlign = Outs[OIdx].Flags.getByValAlign();
+ // The ByValAlign in the Outs[OIdx].Flags is alway set at this point,
+ // so we don't need to worry about natural alignment or not.
+ // See TargetLowering::LowerCallTo().
+
+ // Enforce minumum alignment of 4 to work around ptxas miscompile
+ // for sm_50+. See corresponding alignment adjustment in
+ // emitFunctionParamList() for details.
+ if (ArgAlign < 4)
+ ArgAlign = 4;
+ SDValue DeclareParamOps[] = {Chain, DAG.getConstant(ArgAlign, dl, MVT::i32),
+ DAG.getConstant(paramCount, dl, MVT::i32),
+ DAG.getConstant(sz, dl, MVT::i32), InFlag};
+ Chain = DAG.getNode(NVPTXISD::DeclareParam, dl, DeclareParamVTs,
+ DeclareParamOps);
+ InFlag = Chain.getValue(1);
+ for (unsigned j = 0, je = VTs.size(); j != je; ++j) {
+ EVT elemtype = VTs[j];
+ int curOffset = Offsets[j];
+ unsigned PartAlign = GreatestCommonDivisor64(ArgAlign, curOffset);
+ auto PtrVT = getPointerTy(DL);
+ SDValue srcAddr = DAG.getNode(ISD::ADD, dl, PtrVT, OutVals[OIdx],
+ DAG.getConstant(curOffset, dl, PtrVT));
+ SDValue theVal = DAG.getLoad(elemtype, dl, tempChain, srcAddr,
+ MachinePointerInfo(), PartAlign);
+ if (elemtype.getSizeInBits() < 16) {
+ theVal = DAG.getNode(ISD::ANY_EXTEND, dl, MVT::i16, theVal);
+ }
+ SDVTList CopyParamVTs = DAG.getVTList(MVT::Other, MVT::Glue);
+ SDValue CopyParamOps[] = { Chain,
+ DAG.getConstant(paramCount, dl, MVT::i32),
+ DAG.getConstant(curOffset, dl, MVT::i32),
+ theVal, InFlag };
+ Chain = DAG.getMemIntrinsicNode(NVPTXISD::StoreParam, dl, CopyParamVTs,
+ CopyParamOps, elemtype,
+ MachinePointerInfo());
+
+ InFlag = Chain.getValue(1);
+ }
+ ++paramCount;
+ }
+
+ GlobalAddressSDNode *Func = dyn_cast<GlobalAddressSDNode>(Callee.getNode());
+ unsigned retAlignment = 0;
+
+ // Handle Result
+ if (Ins.size() > 0) {
+ SmallVector<EVT, 16> resvtparts;
+ ComputeValueVTs(*this, DL, RetTy, resvtparts);
+
+ // Declare
+ // .param .align 16 .b8 retval0[<size-in-bytes>], or
+ // .param .b<size-in-bits> retval0
+ unsigned resultsz = DL.getTypeAllocSizeInBits(RetTy);
+ // Emit ".param .b<size-in-bits> retval0" instead of byte arrays only for
+ // these three types to match the logic in
+ // NVPTXAsmPrinter::printReturnValStr and NVPTXTargetLowering::getPrototype.
+ // Plus, this behavior is consistent with nvcc's.
+ if (RetTy->isFloatingPointTy() || RetTy->isIntegerTy() ||
+ RetTy->isPointerTy()) {
+ // Scalar needs to be at least 32bit wide
+ if (resultsz < 32)
+ resultsz = 32;
+ SDVTList DeclareRetVTs = DAG.getVTList(MVT::Other, MVT::Glue);
+ SDValue DeclareRetOps[] = { Chain, DAG.getConstant(1, dl, MVT::i32),
+ DAG.getConstant(resultsz, dl, MVT::i32),
+ DAG.getConstant(0, dl, MVT::i32), InFlag };
+ Chain = DAG.getNode(NVPTXISD::DeclareRet, dl, DeclareRetVTs,
+ DeclareRetOps);
+ InFlag = Chain.getValue(1);
+ } else {
+ retAlignment = getArgumentAlignment(Callee, CS, RetTy, 0, DL);
+ SDVTList DeclareRetVTs = DAG.getVTList(MVT::Other, MVT::Glue);
+ SDValue DeclareRetOps[] = { Chain,
+ DAG.getConstant(retAlignment, dl, MVT::i32),
+ DAG.getConstant(resultsz / 8, dl, MVT::i32),
+ DAG.getConstant(0, dl, MVT::i32), InFlag };
+ Chain = DAG.getNode(NVPTXISD::DeclareRetParam, dl, DeclareRetVTs,
+ DeclareRetOps);
+ InFlag = Chain.getValue(1);
+ }
+ }
+
+ if (!Func) {
+ // This is indirect function call case : PTX requires a prototype of the
+ // form
+ // proto_0 : .callprototype(.param .b32 _) _ (.param .b32 _);
+ // to be emitted, and the label has to used as the last arg of call
+ // instruction.
+ // The prototype is embedded in a string and put as the operand for a
+ // CallPrototype SDNode which will print out to the value of the string.
+ SDVTList ProtoVTs = DAG.getVTList(MVT::Other, MVT::Glue);
+ std::string Proto = getPrototype(DL, RetTy, Args, Outs, retAlignment, CS);
+ const char *ProtoStr =
+ nvTM->getManagedStrPool()->getManagedString(Proto.c_str())->c_str();
+ SDValue ProtoOps[] = {
+ Chain, DAG.getTargetExternalSymbol(ProtoStr, MVT::i32), InFlag,
+ };
+ Chain = DAG.getNode(NVPTXISD::CallPrototype, dl, ProtoVTs, ProtoOps);
+ InFlag = Chain.getValue(1);
+ }
+ // Op to just print "call"
+ SDVTList PrintCallVTs = DAG.getVTList(MVT::Other, MVT::Glue);
+ SDValue PrintCallOps[] = {
+ Chain, DAG.getConstant((Ins.size() == 0) ? 0 : 1, dl, MVT::i32), InFlag
+ };
+ // We model convergent calls as separate opcodes.
+ unsigned Opcode = Func ? NVPTXISD::PrintCallUni : NVPTXISD::PrintCall;
+ if (CLI.IsConvergent)
+ Opcode = Opcode == NVPTXISD::PrintCallUni ? NVPTXISD::PrintConvergentCallUni
+ : NVPTXISD::PrintConvergentCall;
+ Chain = DAG.getNode(Opcode, dl, PrintCallVTs, PrintCallOps);
+ InFlag = Chain.getValue(1);
+
+ // Ops to print out the function name
+ SDVTList CallVoidVTs = DAG.getVTList(MVT::Other, MVT::Glue);
+ SDValue CallVoidOps[] = { Chain, Callee, InFlag };
+ Chain = DAG.getNode(NVPTXISD::CallVoid, dl, CallVoidVTs, CallVoidOps);
+ InFlag = Chain.getValue(1);
+
+ // Ops to print out the param list
+ SDVTList CallArgBeginVTs = DAG.getVTList(MVT::Other, MVT::Glue);
+ SDValue CallArgBeginOps[] = { Chain, InFlag };
+ Chain = DAG.getNode(NVPTXISD::CallArgBegin, dl, CallArgBeginVTs,
+ CallArgBeginOps);
+ InFlag = Chain.getValue(1);
+
+ for (unsigned i = 0, e = paramCount; i != e; ++i) {
+ unsigned opcode;
+ if (i == (e - 1))
+ opcode = NVPTXISD::LastCallArg;
+ else
+ opcode = NVPTXISD::CallArg;
+ SDVTList CallArgVTs = DAG.getVTList(MVT::Other, MVT::Glue);
+ SDValue CallArgOps[] = { Chain, DAG.getConstant(1, dl, MVT::i32),
+ DAG.getConstant(i, dl, MVT::i32), InFlag };
+ Chain = DAG.getNode(opcode, dl, CallArgVTs, CallArgOps);
+ InFlag = Chain.getValue(1);
+ }
+ SDVTList CallArgEndVTs = DAG.getVTList(MVT::Other, MVT::Glue);
+ SDValue CallArgEndOps[] = { Chain,
+ DAG.getConstant(Func ? 1 : 0, dl, MVT::i32),
+ InFlag };
+ Chain = DAG.getNode(NVPTXISD::CallArgEnd, dl, CallArgEndVTs, CallArgEndOps);
+ InFlag = Chain.getValue(1);
+
+ if (!Func) {
+ SDVTList PrototypeVTs = DAG.getVTList(MVT::Other, MVT::Glue);
+ SDValue PrototypeOps[] = { Chain,
+ DAG.getConstant(uniqueCallSite, dl, MVT::i32),
+ InFlag };
+ Chain = DAG.getNode(NVPTXISD::Prototype, dl, PrototypeVTs, PrototypeOps);
+ InFlag = Chain.getValue(1);
+ }
+
+ // Generate loads from param memory/moves from registers for result
+ if (Ins.size() > 0) {
+ SmallVector<EVT, 16> VTs;
+ SmallVector<uint64_t, 16> Offsets;
+ ComputePTXValueVTs(*this, DL, RetTy, VTs, &Offsets, 0);
+ assert(VTs.size() == Ins.size() && "Bad value decomposition");
+
+ unsigned RetAlign = getArgumentAlignment(Callee, CS, RetTy, 0, DL);
+ auto VectorInfo = VectorizePTXValueVTs(VTs, Offsets, RetAlign);
+
+ SmallVector<EVT, 6> LoadVTs;
+ int VecIdx = -1; // Index of the first element of the vector.
+
+ // PTX Interoperability Guide 3.3(A): [Integer] Values shorter than
+ // 32-bits are sign extended or zero extended, depending on whether
+ // they are signed or unsigned types.
+ bool ExtendIntegerRetVal =
+ RetTy->isIntegerTy() && DL.getTypeAllocSizeInBits(RetTy) < 32;
+
+ for (unsigned i = 0, e = VTs.size(); i != e; ++i) {
+ bool needTruncate = false;
+ EVT TheLoadType = VTs[i];
+ EVT EltType = Ins[i].VT;
+ unsigned EltAlign = GreatestCommonDivisor64(RetAlign, Offsets[i]);
+ if (ExtendIntegerRetVal) {
+ TheLoadType = MVT::i32;
+ EltType = MVT::i32;
+ needTruncate = true;
+ } else if (TheLoadType.getSizeInBits() < 16) {
+ if (VTs[i].isInteger())
+ needTruncate = true;
+ EltType = MVT::i16;
+ }
+
+ // Record index of the very first element of the vector.
+ if (VectorInfo[i] & PVF_FIRST) {
+ assert(VecIdx == -1 && LoadVTs.empty() && "Orphaned operand list.");
+ VecIdx = i;
+ }
+
+ LoadVTs.push_back(EltType);
+
+ if (VectorInfo[i] & PVF_LAST) {
+ unsigned NumElts = LoadVTs.size();
+ LoadVTs.push_back(MVT::Other);
+ LoadVTs.push_back(MVT::Glue);
+ NVPTXISD::NodeType Op;
+ switch (NumElts) {
+ case 1:
+ Op = NVPTXISD::LoadParam;
+ break;
+ case 2:
+ Op = NVPTXISD::LoadParamV2;
+ break;
+ case 4:
+ Op = NVPTXISD::LoadParamV4;
+ break;
+ default:
+ llvm_unreachable("Invalid vector info.");
+ }
+
+ SDValue LoadOperands[] = {
+ Chain, DAG.getConstant(1, dl, MVT::i32),
+ DAG.getConstant(Offsets[VecIdx], dl, MVT::i32), InFlag};
+ SDValue RetVal = DAG.getMemIntrinsicNode(
+ Op, dl, DAG.getVTList(LoadVTs), LoadOperands, TheLoadType,
+ MachinePointerInfo(), EltAlign);
+
+ for (unsigned j = 0; j < NumElts; ++j) {
+ SDValue Ret = RetVal.getValue(j);
+ if (needTruncate)
+ Ret = DAG.getNode(ISD::TRUNCATE, dl, Ins[VecIdx + j].VT, Ret);
+ InVals.push_back(Ret);
+ }
+ Chain = RetVal.getValue(NumElts);
+ InFlag = RetVal.getValue(NumElts + 1);
+
+ // Cleanup
+ VecIdx = -1;
+ LoadVTs.clear();
+ }
+ }
+ }
+
+ Chain = DAG.getCALLSEQ_END(Chain,
+ DAG.getIntPtrConstant(uniqueCallSite, dl, true),
+ DAG.getIntPtrConstant(uniqueCallSite + 1, dl,
+ true),
+ InFlag, dl);
+ uniqueCallSite++;
+
+ // set isTailCall to false for now, until we figure out how to express
+ // tail call optimization in PTX
+ isTailCall = false;
+ return Chain;
+}
+
+// By default CONCAT_VECTORS is lowered by ExpandVectorBuildThroughStack()
+// (see LegalizeDAG.cpp). This is slow and uses local memory.
+// We use extract/insert/build vector just as what LegalizeOp() does in llvm 2.5
+SDValue
+NVPTXTargetLowering::LowerCONCAT_VECTORS(SDValue Op, SelectionDAG &DAG) const {
+ SDNode *Node = Op.getNode();
+ SDLoc dl(Node);
+ SmallVector<SDValue, 8> Ops;
+ unsigned NumOperands = Node->getNumOperands();
+ for (unsigned i = 0; i < NumOperands; ++i) {
+ SDValue SubOp = Node->getOperand(i);
+ EVT VVT = SubOp.getNode()->getValueType(0);
+ EVT EltVT = VVT.getVectorElementType();
+ unsigned NumSubElem = VVT.getVectorNumElements();
+ for (unsigned j = 0; j < NumSubElem; ++j) {
+ Ops.push_back(DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, EltVT, SubOp,
+ DAG.getIntPtrConstant(j, dl)));
+ }
+ }
+ return DAG.getBuildVector(Node->getValueType(0), dl, Ops);
+}
+
+// We can init constant f16x2 with a single .b32 move. Normally it
+// would get lowered as two constant loads and vector-packing move.
+// mov.b16 %h1, 0x4000;
+// mov.b16 %h2, 0x3C00;
+// mov.b32 %hh2, {%h2, %h1};
+// Instead we want just a constant move:
+// mov.b32 %hh2, 0x40003C00
+//
+// This results in better SASS code with CUDA 7.x. Ptxas in CUDA 8.0
+// generates good SASS in both cases.
+SDValue NVPTXTargetLowering::LowerBUILD_VECTOR(SDValue Op,
+ SelectionDAG &DAG) const {
+ //return Op;
+ if (!(Op->getValueType(0) == MVT::v2f16 &&
+ isa<ConstantFPSDNode>(Op->getOperand(0)) &&
+ isa<ConstantFPSDNode>(Op->getOperand(1))))
+ return Op;
+
+ APInt E0 =
+ cast<ConstantFPSDNode>(Op->getOperand(0))->getValueAPF().bitcastToAPInt();
+ APInt E1 =
+ cast<ConstantFPSDNode>(Op->getOperand(1))->getValueAPF().bitcastToAPInt();
+ SDValue Const =
+ DAG.getConstant(E1.zext(32).shl(16) | E0.zext(32), SDLoc(Op), MVT::i32);
+ return DAG.getNode(ISD::BITCAST, SDLoc(Op), MVT::v2f16, Const);
+}
+
+SDValue NVPTXTargetLowering::LowerEXTRACT_VECTOR_ELT(SDValue Op,
+ SelectionDAG &DAG) const {
+ SDValue Index = Op->getOperand(1);
+ // Constant index will be matched by tablegen.
+ if (isa<ConstantSDNode>(Index.getNode()))
+ return Op;
+
+ // Extract individual elements and select one of them.
+ SDValue Vector = Op->getOperand(0);
+ EVT VectorVT = Vector.getValueType();
+ assert(VectorVT == MVT::v2f16 && "Unexpected vector type.");
+ EVT EltVT = VectorVT.getVectorElementType();
+
+ SDLoc dl(Op.getNode());
+ SDValue E0 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, EltVT, Vector,
+ DAG.getIntPtrConstant(0, dl));
+ SDValue E1 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, EltVT, Vector,
+ DAG.getIntPtrConstant(1, dl));
+ return DAG.getSelectCC(dl, Index, DAG.getIntPtrConstant(0, dl), E0, E1,
+ ISD::CondCode::SETEQ);
+}
+
+/// LowerShiftRightParts - Lower SRL_PARTS, SRA_PARTS, which
+/// 1) returns two i32 values and take a 2 x i32 value to shift plus a shift
+/// amount, or
+/// 2) returns two i64 values and take a 2 x i64 value to shift plus a shift
+/// amount.
+SDValue NVPTXTargetLowering::LowerShiftRightParts(SDValue Op,
+ SelectionDAG &DAG) const {
+ assert(Op.getNumOperands() == 3 && "Not a double-shift!");
+ assert(Op.getOpcode() == ISD::SRA_PARTS || Op.getOpcode() == ISD::SRL_PARTS);
+
+ EVT VT = Op.getValueType();
+ unsigned VTBits = VT.getSizeInBits();
+ SDLoc dl(Op);
+ SDValue ShOpLo = Op.getOperand(0);
+ SDValue ShOpHi = Op.getOperand(1);
+ SDValue ShAmt = Op.getOperand(2);
+ unsigned Opc = (Op.getOpcode() == ISD::SRA_PARTS) ? ISD::SRA : ISD::SRL;
+
+ if (VTBits == 32 && STI.getSmVersion() >= 35) {
+ // For 32bit and sm35, we can use the funnel shift 'shf' instruction.
+ // {dHi, dLo} = {aHi, aLo} >> Amt
+ // dHi = aHi >> Amt
+ // dLo = shf.r.clamp aLo, aHi, Amt
+
+ SDValue Hi = DAG.getNode(Opc, dl, VT, ShOpHi, ShAmt);
+ SDValue Lo = DAG.getNode(NVPTXISD::FUN_SHFR_CLAMP, dl, VT, ShOpLo, ShOpHi,
+ ShAmt);
+
+ SDValue Ops[2] = { Lo, Hi };
+ return DAG.getMergeValues(Ops, dl);
+ }
+ else {
+ // {dHi, dLo} = {aHi, aLo} >> Amt
+ // - if (Amt>=size) then
+ // dLo = aHi >> (Amt-size)
+ // dHi = aHi >> Amt (this is either all 0 or all 1)
+ // else
+ // dLo = (aLo >>logic Amt) | (aHi << (size-Amt))
+ // dHi = aHi >> Amt
+
+ SDValue RevShAmt = DAG.getNode(ISD::SUB, dl, MVT::i32,
+ DAG.getConstant(VTBits, dl, MVT::i32),
+ ShAmt);
+ SDValue Tmp1 = DAG.getNode(ISD::SRL, dl, VT, ShOpLo, ShAmt);
+ SDValue ExtraShAmt = DAG.getNode(ISD::SUB, dl, MVT::i32, ShAmt,
+ DAG.getConstant(VTBits, dl, MVT::i32));
+ SDValue Tmp2 = DAG.getNode(ISD::SHL, dl, VT, ShOpHi, RevShAmt);
+ SDValue FalseVal = DAG.getNode(ISD::OR, dl, VT, Tmp1, Tmp2);
+ SDValue TrueVal = DAG.getNode(Opc, dl, VT, ShOpHi, ExtraShAmt);
+
+ SDValue Cmp = DAG.getSetCC(dl, MVT::i1, ShAmt,
+ DAG.getConstant(VTBits, dl, MVT::i32),
+ ISD::SETGE);
+ SDValue Hi = DAG.getNode(Opc, dl, VT, ShOpHi, ShAmt);
+ SDValue Lo = DAG.getNode(ISD::SELECT, dl, VT, Cmp, TrueVal, FalseVal);
+
+ SDValue Ops[2] = { Lo, Hi };
+ return DAG.getMergeValues(Ops, dl);
+ }
+}
+
+/// LowerShiftLeftParts - Lower SHL_PARTS, which
+/// 1) returns two i32 values and take a 2 x i32 value to shift plus a shift
+/// amount, or
+/// 2) returns two i64 values and take a 2 x i64 value to shift plus a shift
+/// amount.
+SDValue NVPTXTargetLowering::LowerShiftLeftParts(SDValue Op,
+ SelectionDAG &DAG) const {
+ assert(Op.getNumOperands() == 3 && "Not a double-shift!");
+ assert(Op.getOpcode() == ISD::SHL_PARTS);
+
+ EVT VT = Op.getValueType();
+ unsigned VTBits = VT.getSizeInBits();
+ SDLoc dl(Op);
+ SDValue ShOpLo = Op.getOperand(0);
+ SDValue ShOpHi = Op.getOperand(1);
+ SDValue ShAmt = Op.getOperand(2);
+
+ if (VTBits == 32 && STI.getSmVersion() >= 35) {
+ // For 32bit and sm35, we can use the funnel shift 'shf' instruction.
+ // {dHi, dLo} = {aHi, aLo} << Amt
+ // dHi = shf.l.clamp aLo, aHi, Amt
+ // dLo = aLo << Amt
+
+ SDValue Hi = DAG.getNode(NVPTXISD::FUN_SHFL_CLAMP, dl, VT, ShOpLo, ShOpHi,
+ ShAmt);
+ SDValue Lo = DAG.getNode(ISD::SHL, dl, VT, ShOpLo, ShAmt);
+
+ SDValue Ops[2] = { Lo, Hi };
+ return DAG.getMergeValues(Ops, dl);
+ }
+ else {
+ // {dHi, dLo} = {aHi, aLo} << Amt
+ // - if (Amt>=size) then
+ // dLo = aLo << Amt (all 0)
+ // dLo = aLo << (Amt-size)
+ // else
+ // dLo = aLo << Amt
+ // dHi = (aHi << Amt) | (aLo >> (size-Amt))
+
+ SDValue RevShAmt = DAG.getNode(ISD::SUB, dl, MVT::i32,
+ DAG.getConstant(VTBits, dl, MVT::i32),
+ ShAmt);
+ SDValue Tmp1 = DAG.getNode(ISD::SHL, dl, VT, ShOpHi, ShAmt);
+ SDValue ExtraShAmt = DAG.getNode(ISD::SUB, dl, MVT::i32, ShAmt,
+ DAG.getConstant(VTBits, dl, MVT::i32));
+ SDValue Tmp2 = DAG.getNode(ISD::SRL, dl, VT, ShOpLo, RevShAmt);
+ SDValue FalseVal = DAG.getNode(ISD::OR, dl, VT, Tmp1, Tmp2);
+ SDValue TrueVal = DAG.getNode(ISD::SHL, dl, VT, ShOpLo, ExtraShAmt);
+
+ SDValue Cmp = DAG.getSetCC(dl, MVT::i1, ShAmt,
+ DAG.getConstant(VTBits, dl, MVT::i32),
+ ISD::SETGE);
+ SDValue Lo = DAG.getNode(ISD::SHL, dl, VT, ShOpLo, ShAmt);
+ SDValue Hi = DAG.getNode(ISD::SELECT, dl, VT, Cmp, TrueVal, FalseVal);
+
+ SDValue Ops[2] = { Lo, Hi };
+ return DAG.getMergeValues(Ops, dl);
+ }
+}
+
+SDValue
+NVPTXTargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) const {
+ switch (Op.getOpcode()) {
+ case ISD::RETURNADDR:
+ return SDValue();
+ case ISD::FRAMEADDR:
+ return SDValue();
+ case ISD::GlobalAddress:
+ return LowerGlobalAddress(Op, DAG);
+ case ISD::INTRINSIC_W_CHAIN:
+ return Op;
+ case ISD::BUILD_VECTOR:
+ return LowerBUILD_VECTOR(Op, DAG);
+ case ISD::EXTRACT_SUBVECTOR:
+ return Op;
+ case ISD::EXTRACT_VECTOR_ELT:
+ return LowerEXTRACT_VECTOR_ELT(Op, DAG);
+ case ISD::CONCAT_VECTORS:
+ return LowerCONCAT_VECTORS(Op, DAG);
+ case ISD::STORE:
+ return LowerSTORE(Op, DAG);
+ case ISD::LOAD:
+ return LowerLOAD(Op, DAG);
+ case ISD::SHL_PARTS:
+ return LowerShiftLeftParts(Op, DAG);
+ case ISD::SRA_PARTS:
+ case ISD::SRL_PARTS:
+ return LowerShiftRightParts(Op, DAG);
+ case ISD::SELECT:
+ return LowerSelect(Op, DAG);
+ default:
+ llvm_unreachable("Custom lowering not defined for operation");
+ }
+}
+
+SDValue NVPTXTargetLowering::LowerSelect(SDValue Op, SelectionDAG &DAG) const {
+ SDValue Op0 = Op->getOperand(0);
+ SDValue Op1 = Op->getOperand(1);
+ SDValue Op2 = Op->getOperand(2);
+ SDLoc DL(Op.getNode());
+
+ assert(Op.getValueType() == MVT::i1 && "Custom lowering enabled only for i1");
+
+ Op1 = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i32, Op1);
+ Op2 = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i32, Op2);
+ SDValue Select = DAG.getNode(ISD::SELECT, DL, MVT::i32, Op0, Op1, Op2);
+ SDValue Trunc = DAG.getNode(ISD::TRUNCATE, DL, MVT::i1, Select);
+
+ return Trunc;
+}
+
+SDValue NVPTXTargetLowering::LowerLOAD(SDValue Op, SelectionDAG &DAG) const {
+ if (Op.getValueType() == MVT::i1)
+ return LowerLOADi1(Op, DAG);
+
+ // v2f16 is legal, so we can't rely on legalizer to handle unaligned
+ // loads and have to handle it here.
+ if (Op.getValueType() == MVT::v2f16) {
+ LoadSDNode *Load = cast<LoadSDNode>(Op);
+ EVT MemVT = Load->getMemoryVT();
+ if (!allowsMemoryAccess(*DAG.getContext(), DAG.getDataLayout(), MemVT,
+ Load->getAddressSpace(), Load->getAlignment())) {
+ SDValue Ops[2];
+ std::tie(Ops[0], Ops[1]) = expandUnalignedLoad(Load, DAG);
+ return DAG.getMergeValues(Ops, SDLoc(Op));
+ }
+ }
+
+ return SDValue();
+}
+
+// v = ld i1* addr
+// =>
+// v1 = ld i8* addr (-> i16)
+// v = trunc i16 to i1
+SDValue NVPTXTargetLowering::LowerLOADi1(SDValue Op, SelectionDAG &DAG) const {
+ SDNode *Node = Op.getNode();
+ LoadSDNode *LD = cast<LoadSDNode>(Node);
+ SDLoc dl(Node);
+ assert(LD->getExtensionType() == ISD::NON_EXTLOAD);
+ assert(Node->getValueType(0) == MVT::i1 &&
+ "Custom lowering for i1 load only");
+ SDValue newLD = DAG.getLoad(MVT::i16, dl, LD->getChain(), LD->getBasePtr(),
+ LD->getPointerInfo(), LD->getAlignment(),
+ LD->getMemOperand()->getFlags());
+ SDValue result = DAG.getNode(ISD::TRUNCATE, dl, MVT::i1, newLD);
+ // The legalizer (the caller) is expecting two values from the legalized
+ // load, so we build a MergeValues node for it. See ExpandUnalignedLoad()
+ // in LegalizeDAG.cpp which also uses MergeValues.
+ SDValue Ops[] = { result, LD->getChain() };
+ return DAG.getMergeValues(Ops, dl);
+}
+
+SDValue NVPTXTargetLowering::LowerSTORE(SDValue Op, SelectionDAG &DAG) const {
+ StoreSDNode *Store = cast<StoreSDNode>(Op);
+ EVT VT = Store->getMemoryVT();
+
+ if (VT == MVT::i1)
+ return LowerSTOREi1(Op, DAG);
+
+ // v2f16 is legal, so we can't rely on legalizer to handle unaligned
+ // stores and have to handle it here.
+ if (VT == MVT::v2f16 &&
+ !allowsMemoryAccess(*DAG.getContext(), DAG.getDataLayout(), VT,
+ Store->getAddressSpace(), Store->getAlignment()))
+ return expandUnalignedStore(Store, DAG);
+
+ if (VT.isVector())
+ return LowerSTOREVector(Op, DAG);
+
+ return SDValue();
+}
+
+SDValue
+NVPTXTargetLowering::LowerSTOREVector(SDValue Op, SelectionDAG &DAG) const {
+ SDNode *N = Op.getNode();
+ SDValue Val = N->getOperand(1);
+ SDLoc DL(N);
+ EVT ValVT = Val.getValueType();
+
+ if (ValVT.isVector()) {
+ // We only handle "native" vector sizes for now, e.g. <4 x double> is not
+ // legal. We can (and should) split that into 2 stores of <2 x double> here
+ // but I'm leaving that as a TODO for now.
+ if (!ValVT.isSimple())
+ return SDValue();
+ switch (ValVT.getSimpleVT().SimpleTy) {
+ default:
+ return SDValue();
+ case MVT::v2i8:
+ case MVT::v2i16:
+ case MVT::v2i32:
+ case MVT::v2i64:
+ case MVT::v2f16:
+ case MVT::v2f32:
+ case MVT::v2f64:
+ case MVT::v4i8:
+ case MVT::v4i16:
+ case MVT::v4i32:
+ case MVT::v4f16:
+ case MVT::v4f32:
+ case MVT::v8f16: // <4 x f16x2>
+ // This is a "native" vector type
+ break;
+ }
+
+ MemSDNode *MemSD = cast<MemSDNode>(N);
+ const DataLayout &TD = DAG.getDataLayout();
+
+ unsigned Align = MemSD->getAlignment();
+ unsigned PrefAlign =
+ TD.getPrefTypeAlignment(ValVT.getTypeForEVT(*DAG.getContext()));
+ if (Align < PrefAlign) {
+ // This store is not sufficiently aligned, so bail out and let this vector
+ // store be scalarized. Note that we may still be able to emit smaller
+ // vector stores. For example, if we are storing a <4 x float> with an
+ // alignment of 8, this check will fail but the legalizer will try again
+ // with 2 x <2 x float>, which will succeed with an alignment of 8.
+ return SDValue();
+ }
+
+ unsigned Opcode = 0;
+ EVT EltVT = ValVT.getVectorElementType();
+ unsigned NumElts = ValVT.getVectorNumElements();
+
+ // Since StoreV2 is a target node, we cannot rely on DAG type legalization.
+ // Therefore, we must ensure the type is legal. For i1 and i8, we set the
+ // stored type to i16 and propagate the "real" type as the memory type.
+ bool NeedExt = false;
+ if (EltVT.getSizeInBits() < 16)
+ NeedExt = true;
+
+ bool StoreF16x2 = false;
+ switch (NumElts) {
+ default:
+ return SDValue();
+ case 2:
+ Opcode = NVPTXISD::StoreV2;
+ break;
+ case 4:
+ Opcode = NVPTXISD::StoreV4;
+ break;
+ case 8:
+ // v8f16 is a special case. PTX doesn't have st.v8.f16
+ // instruction. Instead, we split the vector into v2f16 chunks and
+ // store them with st.v4.b32.
+ assert(EltVT == MVT::f16 && "Wrong type for the vector.");
+ Opcode = NVPTXISD::StoreV4;
+ StoreF16x2 = true;
+ break;
+ }
+
+ SmallVector<SDValue, 8> Ops;
+
+ // First is the chain
+ Ops.push_back(N->getOperand(0));
+
+ if (StoreF16x2) {
+ // Combine f16,f16 -> v2f16
+ NumElts /= 2;
+ for (unsigned i = 0; i < NumElts; ++i) {
+ SDValue E0 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::f16, Val,
+ DAG.getIntPtrConstant(i * 2, DL));
+ SDValue E1 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::f16, Val,
+ DAG.getIntPtrConstant(i * 2 + 1, DL));
+ SDValue V2 = DAG.getNode(ISD::BUILD_VECTOR, DL, MVT::v2f16, E0, E1);
+ Ops.push_back(V2);
+ }
+ } else {
+ // Then the split values
+ for (unsigned i = 0; i < NumElts; ++i) {
+ SDValue ExtVal = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, EltVT, Val,
+ DAG.getIntPtrConstant(i, DL));
+ if (NeedExt)
+ ExtVal = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i16, ExtVal);
+ Ops.push_back(ExtVal);
+ }
+ }
+
+ // Then any remaining arguments
+ Ops.append(N->op_begin() + 2, N->op_end());
+
+ SDValue NewSt =
+ DAG.getMemIntrinsicNode(Opcode, DL, DAG.getVTList(MVT::Other), Ops,
+ MemSD->getMemoryVT(), MemSD->getMemOperand());
+
+ // return DCI.CombineTo(N, NewSt, true);
+ return NewSt;
+ }
+
+ return SDValue();
+}
+
+// st i1 v, addr
+// =>
+// v1 = zxt v to i16
+// st.u8 i16, addr
+SDValue NVPTXTargetLowering::LowerSTOREi1(SDValue Op, SelectionDAG &DAG) const {
+ SDNode *Node = Op.getNode();
+ SDLoc dl(Node);
+ StoreSDNode *ST = cast<StoreSDNode>(Node);
+ SDValue Tmp1 = ST->getChain();
+ SDValue Tmp2 = ST->getBasePtr();
+ SDValue Tmp3 = ST->getValue();
+ assert(Tmp3.getValueType() == MVT::i1 && "Custom lowering for i1 store only");
+ Tmp3 = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i16, Tmp3);
+ SDValue Result =
+ DAG.getTruncStore(Tmp1, dl, Tmp3, Tmp2, ST->getPointerInfo(), MVT::i8,
+ ST->getAlignment(), ST->getMemOperand()->getFlags());
+ return Result;
+}
+
+SDValue
+NVPTXTargetLowering::getParamSymbol(SelectionDAG &DAG, int idx, EVT v) const {
+ std::string ParamSym;
+ raw_string_ostream ParamStr(ParamSym);
+
+ ParamStr << DAG.getMachineFunction().getName() << "_param_" << idx;
+ ParamStr.flush();
+
+ std::string *SavedStr =
+ nvTM->getManagedStrPool()->getManagedString(ParamSym.c_str());
+ return DAG.getTargetExternalSymbol(SavedStr->c_str(), v);
+}
+
+// Check to see if the kernel argument is image*_t or sampler_t
+
+static bool isImageOrSamplerVal(const Value *arg, const Module *context) {
+ static const char *const specialTypes[] = { "struct._image2d_t",
+ "struct._image3d_t",
+ "struct._sampler_t" };
+
+ Type *Ty = arg->getType();
+ auto *PTy = dyn_cast<PointerType>(Ty);
+
+ if (!PTy)
+ return false;
+
+ if (!context)
+ return false;
+
+ auto *STy = dyn_cast<StructType>(PTy->getElementType());
+ if (!STy || STy->isLiteral())
+ return false;
+
+ return std::find(std::begin(specialTypes), std::end(specialTypes),
+ STy->getName()) != std::end(specialTypes);
+}
+
+SDValue NVPTXTargetLowering::LowerFormalArguments(
+ SDValue Chain, CallingConv::ID CallConv, bool isVarArg,
+ const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl,
+ SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const {
+ MachineFunction &MF = DAG.getMachineFunction();
+ const DataLayout &DL = DAG.getDataLayout();
+ auto PtrVT = getPointerTy(DAG.getDataLayout());
+
+ const Function *F = MF.getFunction();
+ const AttributeList &PAL = F->getAttributes();
+ const TargetLowering *TLI = STI.getTargetLowering();
+
+ SDValue Root = DAG.getRoot();
+ std::vector<SDValue> OutChains;
+
+ bool isABI = (STI.getSmVersion() >= 20);
+ assert(isABI && "Non-ABI compilation is not supported");
+ if (!isABI)
+ return Chain;
+
+ std::vector<Type *> argTypes;
+ std::vector<const Argument *> theArgs;
+ for (const Argument &I : F->args()) {
+ theArgs.push_back(&I);
+ argTypes.push_back(I.getType());
+ }
+ // argTypes.size() (or theArgs.size()) and Ins.size() need not match.
+ // Ins.size() will be larger
+ // * if there is an aggregate argument with multiple fields (each field
+ // showing up separately in Ins)
+ // * if there is a vector argument with more than typical vector-length
+ // elements (generally if more than 4) where each vector element is
+ // individually present in Ins.
+ // So a different index should be used for indexing into Ins.
+ // See similar issue in LowerCall.
+ unsigned InsIdx = 0;
+
+ int idx = 0;
+ for (unsigned i = 0, e = theArgs.size(); i != e; ++i, ++idx, ++InsIdx) {
+ Type *Ty = argTypes[i];
+
+ // If the kernel argument is image*_t or sampler_t, convert it to
+ // a i32 constant holding the parameter position. This can later
+ // matched in the AsmPrinter to output the correct mangled name.
+ if (isImageOrSamplerVal(
+ theArgs[i],
+ (theArgs[i]->getParent() ? theArgs[i]->getParent()->getParent()
+ : nullptr))) {
+ assert(isKernelFunction(*F) &&
+ "Only kernels can have image/sampler params");
+ InVals.push_back(DAG.getConstant(i + 1, dl, MVT::i32));
+ continue;
+ }
+
+ if (theArgs[i]->use_empty()) {
+ // argument is dead
+ if (Ty->isAggregateType()) {
+ SmallVector<EVT, 16> vtparts;
+
+ ComputePTXValueVTs(*this, DAG.getDataLayout(), Ty, vtparts);
+ assert(vtparts.size() > 0 && "empty aggregate type not expected");
+ for (unsigned parti = 0, parte = vtparts.size(); parti != parte;
+ ++parti) {
+ InVals.push_back(DAG.getNode(ISD::UNDEF, dl, Ins[InsIdx].VT));
+ ++InsIdx;
+ }
+ if (vtparts.size() > 0)
+ --InsIdx;
+ continue;
+ }
+ if (Ty->isVectorTy()) {
+ EVT ObjectVT = getValueType(DL, Ty);
+ unsigned NumRegs = TLI->getNumRegisters(F->getContext(), ObjectVT);
+ for (unsigned parti = 0; parti < NumRegs; ++parti) {
+ InVals.push_back(DAG.getNode(ISD::UNDEF, dl, Ins[InsIdx].VT));
+ ++InsIdx;
+ }
+ if (NumRegs > 0)
+ --InsIdx;
+ continue;
+ }
+ InVals.push_back(DAG.getNode(ISD::UNDEF, dl, Ins[InsIdx].VT));
+ continue;
+ }
+
+ // In the following cases, assign a node order of "idx+1"
+ // to newly created nodes. The SDNodes for params have to
+ // appear in the same order as their order of appearance
+ // in the original function. "idx+1" holds that order.
+ if (!PAL.hasParamAttribute(i, Attribute::ByVal)) {
+ bool aggregateIsPacked = false;
+ if (StructType *STy = dyn_cast<StructType>(Ty))
+ aggregateIsPacked = STy->isPacked();
+
+ SmallVector<EVT, 16> VTs;
+ SmallVector<uint64_t, 16> Offsets;
+ ComputePTXValueVTs(*this, DL, Ty, VTs, &Offsets, 0);
+ assert(VTs.size() > 0 && "Unexpected empty type.");
+ auto VectorInfo =
+ VectorizePTXValueVTs(VTs, Offsets, DL.getABITypeAlignment(Ty));
+
+ SDValue Arg = getParamSymbol(DAG, idx, PtrVT);
+ int VecIdx = -1; // Index of the first element of the current vector.
+ for (unsigned parti = 0, parte = VTs.size(); parti != parte; ++parti) {
+ if (VectorInfo[parti] & PVF_FIRST) {
+ assert(VecIdx == -1 && "Orphaned vector.");
+ VecIdx = parti;
+ }
+
+ // That's the last element of this store op.
+ if (VectorInfo[parti] & PVF_LAST) {
+ unsigned NumElts = parti - VecIdx + 1;
+ EVT EltVT = VTs[parti];
+ // i1 is loaded/stored as i8.
+ EVT LoadVT = EltVT;
+ if (EltVT == MVT::i1)
+ LoadVT = MVT::i8;
+ else if (EltVT == MVT::v2f16)
+ // getLoad needs a vector type, but it can't handle
+ // vectors which contain v2f16 elements. So we must load
+ // using i32 here and then bitcast back.
+ LoadVT = MVT::i32;
+
+ EVT VecVT = EVT::getVectorVT(F->getContext(), LoadVT, NumElts);
+ SDValue VecAddr =
+ DAG.getNode(ISD::ADD, dl, PtrVT, Arg,
+ DAG.getConstant(Offsets[VecIdx], dl, PtrVT));
+ Value *srcValue = Constant::getNullValue(PointerType::get(
+ EltVT.getTypeForEVT(F->getContext()), ADDRESS_SPACE_PARAM));
+ SDValue P =
+ DAG.getLoad(VecVT, dl, Root, VecAddr,
+ MachinePointerInfo(srcValue), aggregateIsPacked,
+ MachineMemOperand::MODereferenceable |
+ MachineMemOperand::MOInvariant);
+ if (P.getNode())
+ P.getNode()->setIROrder(idx + 1);
+ for (unsigned j = 0; j < NumElts; ++j) {
+ SDValue Elt = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, LoadVT, P,
+ DAG.getIntPtrConstant(j, dl));
+ // We've loaded i1 as an i8 and now must truncate it back to i1
+ if (EltVT == MVT::i1)
+ Elt = DAG.getNode(ISD::TRUNCATE, dl, MVT::i1, Elt);
+ // v2f16 was loaded as an i32. Now we must bitcast it back.
+ else if (EltVT == MVT::v2f16)
+ Elt = DAG.getNode(ISD::BITCAST, dl, MVT::v2f16, Elt);
+ // Extend the element if necesary (e.g. an i8 is loaded
+ // into an i16 register)
+ if (Ins[InsIdx].VT.isInteger() &&
+ Ins[InsIdx].VT.getSizeInBits() > LoadVT.getSizeInBits()) {
+ unsigned Extend = Ins[InsIdx].Flags.isSExt() ? ISD::SIGN_EXTEND
+ : ISD::ZERO_EXTEND;
+ Elt = DAG.getNode(Extend, dl, Ins[InsIdx].VT, Elt);
+ }
+ InVals.push_back(Elt);
+ }
+
+ // Reset vector tracking state.
+ VecIdx = -1;
+ }
+ ++InsIdx;
+ }
+ if (VTs.size() > 0)
+ --InsIdx;
+ continue;
+ }
+
+ // Param has ByVal attribute
+ // Return MoveParam(param symbol).
+ // Ideally, the param symbol can be returned directly,
+ // but when SDNode builder decides to use it in a CopyToReg(),
+ // machine instruction fails because TargetExternalSymbol
+ // (not lowered) is target dependent, and CopyToReg assumes
+ // the source is lowered.
+ EVT ObjectVT = getValueType(DL, Ty);
+ assert(ObjectVT == Ins[InsIdx].VT &&
+ "Ins type did not match function type");
+ SDValue Arg = getParamSymbol(DAG, idx, PtrVT);
+ SDValue p = DAG.getNode(NVPTXISD::MoveParam, dl, ObjectVT, Arg);
+ if (p.getNode())
+ p.getNode()->setIROrder(idx + 1);
+ InVals.push_back(p);
+ }
+
+ // Clang will check explicit VarArg and issue error if any. However, Clang
+ // will let code with
+ // implicit var arg like f() pass. See bug 617733.
+ // We treat this case as if the arg list is empty.
+ // if (F.isVarArg()) {
+ // assert(0 && "VarArg not supported yet!");
+ //}
+
+ if (!OutChains.empty())
+ DAG.setRoot(DAG.getNode(ISD::TokenFactor, dl, MVT::Other, OutChains));
+
+ return Chain;
+}
+
+SDValue
+NVPTXTargetLowering::LowerReturn(SDValue Chain, CallingConv::ID CallConv,
+ bool isVarArg,
+ const SmallVectorImpl<ISD::OutputArg> &Outs,
+ const SmallVectorImpl<SDValue> &OutVals,
+ const SDLoc &dl, SelectionDAG &DAG) const {
+ MachineFunction &MF = DAG.getMachineFunction();
+ Type *RetTy = MF.getFunction()->getReturnType();
+
+ bool isABI = (STI.getSmVersion() >= 20);
+ assert(isABI && "Non-ABI compilation is not supported");
+ if (!isABI)
+ return Chain;
+
+ const DataLayout DL = DAG.getDataLayout();
+ SmallVector<EVT, 16> VTs;
+ SmallVector<uint64_t, 16> Offsets;
+ ComputePTXValueVTs(*this, DL, RetTy, VTs, &Offsets);
+ assert(VTs.size() == OutVals.size() && "Bad return value decomposition");
+
+ auto VectorInfo = VectorizePTXValueVTs(
+ VTs, Offsets, RetTy->isSized() ? DL.getABITypeAlignment(RetTy) : 1);
+
+ // PTX Interoperability Guide 3.3(A): [Integer] Values shorter than
+ // 32-bits are sign extended or zero extended, depending on whether
+ // they are signed or unsigned types.
+ bool ExtendIntegerRetVal =
+ RetTy->isIntegerTy() && DL.getTypeAllocSizeInBits(RetTy) < 32;
+
+ SmallVector<SDValue, 6> StoreOperands;
+ for (unsigned i = 0, e = VTs.size(); i != e; ++i) {
+ // New load/store. Record chain and offset operands.
+ if (VectorInfo[i] & PVF_FIRST) {
+ assert(StoreOperands.empty() && "Orphaned operand list.");
+ StoreOperands.push_back(Chain);
+ StoreOperands.push_back(DAG.getConstant(Offsets[i], dl, MVT::i32));
+ }
+
+ SDValue RetVal = OutVals[i];
+ if (ExtendIntegerRetVal) {
+ RetVal = DAG.getNode(Outs[i].Flags.isSExt() ? ISD::SIGN_EXTEND
+ : ISD::ZERO_EXTEND,
+ dl, MVT::i32, RetVal);
+ } else if (RetVal.getValueSizeInBits() < 16) {
+ // Use 16-bit registers for small load-stores as it's the
+ // smallest general purpose register size supported by NVPTX.
+ RetVal = DAG.getNode(ISD::ANY_EXTEND, dl, MVT::i16, RetVal);
+ }
+
+ // Record the value to return.
+ StoreOperands.push_back(RetVal);
+
+ // That's the last element of this store op.
+ if (VectorInfo[i] & PVF_LAST) {
+ NVPTXISD::NodeType Op;
+ unsigned NumElts = StoreOperands.size() - 2;
+ switch (NumElts) {
+ case 1:
+ Op = NVPTXISD::StoreRetval;
+ break;
+ case 2:
+ Op = NVPTXISD::StoreRetvalV2;
+ break;
+ case 4:
+ Op = NVPTXISD::StoreRetvalV4;
+ break;
+ default:
+ llvm_unreachable("Invalid vector info.");
+ }
+
+ // Adjust type of load/store op if we've extended the scalar
+ // return value.
+ EVT TheStoreType = ExtendIntegerRetVal ? MVT::i32 : VTs[i];
+ Chain = DAG.getMemIntrinsicNode(Op, dl, DAG.getVTList(MVT::Other),
+ StoreOperands, TheStoreType,
+ MachinePointerInfo(), 1);
+ // Cleanup vector state.
+ StoreOperands.clear();
+ }
+ }
+
+ return DAG.getNode(NVPTXISD::RET_FLAG, dl, MVT::Other, Chain);
+}
+
+void NVPTXTargetLowering::LowerAsmOperandForConstraint(
+ SDValue Op, std::string &Constraint, std::vector<SDValue> &Ops,
+ SelectionDAG &DAG) const {
+ if (Constraint.length() > 1)
+ return;
+ else
+ TargetLowering::LowerAsmOperandForConstraint(Op, Constraint, Ops, DAG);
+}
+
+static unsigned getOpcForTextureInstr(unsigned Intrinsic) {
+ switch (Intrinsic) {
+ default:
+ return 0;
+
+ case Intrinsic::nvvm_tex_1d_v4f32_s32:
+ return NVPTXISD::Tex1DFloatS32;
+ case Intrinsic::nvvm_tex_1d_v4f32_f32:
+ return NVPTXISD::Tex1DFloatFloat;
+ case Intrinsic::nvvm_tex_1d_level_v4f32_f32:
+ return NVPTXISD::Tex1DFloatFloatLevel;
+ case Intrinsic::nvvm_tex_1d_grad_v4f32_f32:
+ return NVPTXISD::Tex1DFloatFloatGrad;
+ case Intrinsic::nvvm_tex_1d_v4s32_s32:
+ return NVPTXISD::Tex1DS32S32;
+ case Intrinsic::nvvm_tex_1d_v4s32_f32:
+ return NVPTXISD::Tex1DS32Float;
+ case Intrinsic::nvvm_tex_1d_level_v4s32_f32:
+ return NVPTXISD::Tex1DS32FloatLevel;
+ case Intrinsic::nvvm_tex_1d_grad_v4s32_f32:
+ return NVPTXISD::Tex1DS32FloatGrad;
+ case Intrinsic::nvvm_tex_1d_v4u32_s32:
+ return NVPTXISD::Tex1DU32S32;
+ case Intrinsic::nvvm_tex_1d_v4u32_f32:
+ return NVPTXISD::Tex1DU32Float;
+ case Intrinsic::nvvm_tex_1d_level_v4u32_f32:
+ return NVPTXISD::Tex1DU32FloatLevel;
+ case Intrinsic::nvvm_tex_1d_grad_v4u32_f32:
+ return NVPTXISD::Tex1DU32FloatGrad;
+
+ case Intrinsic::nvvm_tex_1d_array_v4f32_s32:
+ return NVPTXISD::Tex1DArrayFloatS32;
+ case Intrinsic::nvvm_tex_1d_array_v4f32_f32:
+ return NVPTXISD::Tex1DArrayFloatFloat;
+ case Intrinsic::nvvm_tex_1d_array_level_v4f32_f32:
+ return NVPTXISD::Tex1DArrayFloatFloatLevel;
+ case Intrinsic::nvvm_tex_1d_array_grad_v4f32_f32:
+ return NVPTXISD::Tex1DArrayFloatFloatGrad;
+ case Intrinsic::nvvm_tex_1d_array_v4s32_s32:
+ return NVPTXISD::Tex1DArrayS32S32;
+ case Intrinsic::nvvm_tex_1d_array_v4s32_f32:
+ return NVPTXISD::Tex1DArrayS32Float;
+ case Intrinsic::nvvm_tex_1d_array_level_v4s32_f32:
+ return NVPTXISD::Tex1DArrayS32FloatLevel;
+ case Intrinsic::nvvm_tex_1d_array_grad_v4s32_f32:
+ return NVPTXISD::Tex1DArrayS32FloatGrad;
+ case Intrinsic::nvvm_tex_1d_array_v4u32_s32:
+ return NVPTXISD::Tex1DArrayU32S32;
+ case Intrinsic::nvvm_tex_1d_array_v4u32_f32:
+ return NVPTXISD::Tex1DArrayU32Float;
+ case Intrinsic::nvvm_tex_1d_array_level_v4u32_f32:
+ return NVPTXISD::Tex1DArrayU32FloatLevel;
+ case Intrinsic::nvvm_tex_1d_array_grad_v4u32_f32:
+ return NVPTXISD::Tex1DArrayU32FloatGrad;
+
+ case Intrinsic::nvvm_tex_2d_v4f32_s32:
+ return NVPTXISD::Tex2DFloatS32;
+ case Intrinsic::nvvm_tex_2d_v4f32_f32:
+ return NVPTXISD::Tex2DFloatFloat;
+ case Intrinsic::nvvm_tex_2d_level_v4f32_f32:
+ return NVPTXISD::Tex2DFloatFloatLevel;
+ case Intrinsic::nvvm_tex_2d_grad_v4f32_f32:
+ return NVPTXISD::Tex2DFloatFloatGrad;
+ case Intrinsic::nvvm_tex_2d_v4s32_s32:
+ return NVPTXISD::Tex2DS32S32;
+ case Intrinsic::nvvm_tex_2d_v4s32_f32:
+ return NVPTXISD::Tex2DS32Float;
+ case Intrinsic::nvvm_tex_2d_level_v4s32_f32:
+ return NVPTXISD::Tex2DS32FloatLevel;
+ case Intrinsic::nvvm_tex_2d_grad_v4s32_f32:
+ return NVPTXISD::Tex2DS32FloatGrad;
+ case Intrinsic::nvvm_tex_2d_v4u32_s32:
+ return NVPTXISD::Tex2DU32S32;
+ case Intrinsic::nvvm_tex_2d_v4u32_f32:
+ return NVPTXISD::Tex2DU32Float;
+ case Intrinsic::nvvm_tex_2d_level_v4u32_f32:
+ return NVPTXISD::Tex2DU32FloatLevel;
+ case Intrinsic::nvvm_tex_2d_grad_v4u32_f32:
+ return NVPTXISD::Tex2DU32FloatGrad;
+
+ case Intrinsic::nvvm_tex_2d_array_v4f32_s32:
+ return NVPTXISD::Tex2DArrayFloatS32;
+ case Intrinsic::nvvm_tex_2d_array_v4f32_f32:
+ return NVPTXISD::Tex2DArrayFloatFloat;
+ case Intrinsic::nvvm_tex_2d_array_level_v4f32_f32:
+ return NVPTXISD::Tex2DArrayFloatFloatLevel;
+ case Intrinsic::nvvm_tex_2d_array_grad_v4f32_f32:
+ return NVPTXISD::Tex2DArrayFloatFloatGrad;
+ case Intrinsic::nvvm_tex_2d_array_v4s32_s32:
+ return NVPTXISD::Tex2DArrayS32S32;
+ case Intrinsic::nvvm_tex_2d_array_v4s32_f32:
+ return NVPTXISD::Tex2DArrayS32Float;
+ case Intrinsic::nvvm_tex_2d_array_level_v4s32_f32:
+ return NVPTXISD::Tex2DArrayS32FloatLevel;
+ case Intrinsic::nvvm_tex_2d_array_grad_v4s32_f32:
+ return NVPTXISD::Tex2DArrayS32FloatGrad;
+ case Intrinsic::nvvm_tex_2d_array_v4u32_s32:
+ return NVPTXISD::Tex2DArrayU32S32;
+ case Intrinsic::nvvm_tex_2d_array_v4u32_f32:
+ return NVPTXISD::Tex2DArrayU32Float;
+ case Intrinsic::nvvm_tex_2d_array_level_v4u32_f32:
+ return NVPTXISD::Tex2DArrayU32FloatLevel;
+ case Intrinsic::nvvm_tex_2d_array_grad_v4u32_f32:
+ return NVPTXISD::Tex2DArrayU32FloatGrad;
+
+ case Intrinsic::nvvm_tex_3d_v4f32_s32:
+ return NVPTXISD::Tex3DFloatS32;
+ case Intrinsic::nvvm_tex_3d_v4f32_f32:
+ return NVPTXISD::Tex3DFloatFloat;
+ case Intrinsic::nvvm_tex_3d_level_v4f32_f32:
+ return NVPTXISD::Tex3DFloatFloatLevel;
+ case Intrinsic::nvvm_tex_3d_grad_v4f32_f32:
+ return NVPTXISD::Tex3DFloatFloatGrad;
+ case Intrinsic::nvvm_tex_3d_v4s32_s32:
+ return NVPTXISD::Tex3DS32S32;
+ case Intrinsic::nvvm_tex_3d_v4s32_f32:
+ return NVPTXISD::Tex3DS32Float;
+ case Intrinsic::nvvm_tex_3d_level_v4s32_f32:
+ return NVPTXISD::Tex3DS32FloatLevel;
+ case Intrinsic::nvvm_tex_3d_grad_v4s32_f32:
+ return NVPTXISD::Tex3DS32FloatGrad;
+ case Intrinsic::nvvm_tex_3d_v4u32_s32:
+ return NVPTXISD::Tex3DU32S32;
+ case Intrinsic::nvvm_tex_3d_v4u32_f32:
+ return NVPTXISD::Tex3DU32Float;
+ case Intrinsic::nvvm_tex_3d_level_v4u32_f32:
+ return NVPTXISD::Tex3DU32FloatLevel;
+ case Intrinsic::nvvm_tex_3d_grad_v4u32_f32:
+ return NVPTXISD::Tex3DU32FloatGrad;
+
+ case Intrinsic::nvvm_tex_cube_v4f32_f32:
+ return NVPTXISD::TexCubeFloatFloat;
+ case Intrinsic::nvvm_tex_cube_level_v4f32_f32:
+ return NVPTXISD::TexCubeFloatFloatLevel;
+ case Intrinsic::nvvm_tex_cube_v4s32_f32:
+ return NVPTXISD::TexCubeS32Float;
+ case Intrinsic::nvvm_tex_cube_level_v4s32_f32:
+ return NVPTXISD::TexCubeS32FloatLevel;
+ case Intrinsic::nvvm_tex_cube_v4u32_f32:
+ return NVPTXISD::TexCubeU32Float;
+ case Intrinsic::nvvm_tex_cube_level_v4u32_f32:
+ return NVPTXISD::TexCubeU32FloatLevel;
+
+ case Intrinsic::nvvm_tex_cube_array_v4f32_f32:
+ return NVPTXISD::TexCubeArrayFloatFloat;
+ case Intrinsic::nvvm_tex_cube_array_level_v4f32_f32:
+ return NVPTXISD::TexCubeArrayFloatFloatLevel;
+ case Intrinsic::nvvm_tex_cube_array_v4s32_f32:
+ return NVPTXISD::TexCubeArrayS32Float;
+ case Intrinsic::nvvm_tex_cube_array_level_v4s32_f32:
+ return NVPTXISD::TexCubeArrayS32FloatLevel;
+ case Intrinsic::nvvm_tex_cube_array_v4u32_f32:
+ return NVPTXISD::TexCubeArrayU32Float;
+ case Intrinsic::nvvm_tex_cube_array_level_v4u32_f32:
+ return NVPTXISD::TexCubeArrayU32FloatLevel;
+
+ case Intrinsic::nvvm_tld4_r_2d_v4f32_f32:
+ return NVPTXISD::Tld4R2DFloatFloat;
+ case Intrinsic::nvvm_tld4_g_2d_v4f32_f32:
+ return NVPTXISD::Tld4G2DFloatFloat;
+ case Intrinsic::nvvm_tld4_b_2d_v4f32_f32:
+ return NVPTXISD::Tld4B2DFloatFloat;
+ case Intrinsic::nvvm_tld4_a_2d_v4f32_f32:
+ return NVPTXISD::Tld4A2DFloatFloat;
+ case Intrinsic::nvvm_tld4_r_2d_v4s32_f32:
+ return NVPTXISD::Tld4R2DS64Float;
+ case Intrinsic::nvvm_tld4_g_2d_v4s32_f32:
+ return NVPTXISD::Tld4G2DS64Float;
+ case Intrinsic::nvvm_tld4_b_2d_v4s32_f32:
+ return NVPTXISD::Tld4B2DS64Float;
+ case Intrinsic::nvvm_tld4_a_2d_v4s32_f32:
+ return NVPTXISD::Tld4A2DS64Float;
+ case Intrinsic::nvvm_tld4_r_2d_v4u32_f32:
+ return NVPTXISD::Tld4R2DU64Float;
+ case Intrinsic::nvvm_tld4_g_2d_v4u32_f32:
+ return NVPTXISD::Tld4G2DU64Float;
+ case Intrinsic::nvvm_tld4_b_2d_v4u32_f32:
+ return NVPTXISD::Tld4B2DU64Float;
+ case Intrinsic::nvvm_tld4_a_2d_v4u32_f32:
+ return NVPTXISD::Tld4A2DU64Float;
+
+ case Intrinsic::nvvm_tex_unified_1d_v4f32_s32:
+ return NVPTXISD::TexUnified1DFloatS32;
+ case Intrinsic::nvvm_tex_unified_1d_v4f32_f32:
+ return NVPTXISD::TexUnified1DFloatFloat;
+ case Intrinsic::nvvm_tex_unified_1d_level_v4f32_f32:
+ return NVPTXISD::TexUnified1DFloatFloatLevel;
+ case Intrinsic::nvvm_tex_unified_1d_grad_v4f32_f32:
+ return NVPTXISD::TexUnified1DFloatFloatGrad;
+ case Intrinsic::nvvm_tex_unified_1d_v4s32_s32:
+ return NVPTXISD::TexUnified1DS32S32;
+ case Intrinsic::nvvm_tex_unified_1d_v4s32_f32:
+ return NVPTXISD::TexUnified1DS32Float;
+ case Intrinsic::nvvm_tex_unified_1d_level_v4s32_f32:
+ return NVPTXISD::TexUnified1DS32FloatLevel;
+ case Intrinsic::nvvm_tex_unified_1d_grad_v4s32_f32:
+ return NVPTXISD::TexUnified1DS32FloatGrad;
+ case Intrinsic::nvvm_tex_unified_1d_v4u32_s32:
+ return NVPTXISD::TexUnified1DU32S32;
+ case Intrinsic::nvvm_tex_unified_1d_v4u32_f32:
+ return NVPTXISD::TexUnified1DU32Float;
+ case Intrinsic::nvvm_tex_unified_1d_level_v4u32_f32:
+ return NVPTXISD::TexUnified1DU32FloatLevel;
+ case Intrinsic::nvvm_tex_unified_1d_grad_v4u32_f32:
+ return NVPTXISD::TexUnified1DU32FloatGrad;
+
+ case Intrinsic::nvvm_tex_unified_1d_array_v4f32_s32:
+ return NVPTXISD::TexUnified1DArrayFloatS32;
+ case Intrinsic::nvvm_tex_unified_1d_array_v4f32_f32:
+ return NVPTXISD::TexUnified1DArrayFloatFloat;
+ case Intrinsic::nvvm_tex_unified_1d_array_level_v4f32_f32:
+ return NVPTXISD::TexUnified1DArrayFloatFloatLevel;
+ case Intrinsic::nvvm_tex_unified_1d_array_grad_v4f32_f32:
+ return NVPTXISD::TexUnified1DArrayFloatFloatGrad;
+ case Intrinsic::nvvm_tex_unified_1d_array_v4s32_s32:
+ return NVPTXISD::TexUnified1DArrayS32S32;
+ case Intrinsic::nvvm_tex_unified_1d_array_v4s32_f32:
+ return NVPTXISD::TexUnified1DArrayS32Float;
+ case Intrinsic::nvvm_tex_unified_1d_array_level_v4s32_f32:
+ return NVPTXISD::TexUnified1DArrayS32FloatLevel;
+ case Intrinsic::nvvm_tex_unified_1d_array_grad_v4s32_f32:
+ return NVPTXISD::TexUnified1DArrayS32FloatGrad;
+ case Intrinsic::nvvm_tex_unified_1d_array_v4u32_s32:
+ return NVPTXISD::TexUnified1DArrayU32S32;
+ case Intrinsic::nvvm_tex_unified_1d_array_v4u32_f32:
+ return NVPTXISD::TexUnified1DArrayU32Float;
+ case Intrinsic::nvvm_tex_unified_1d_array_level_v4u32_f32:
+ return NVPTXISD::TexUnified1DArrayU32FloatLevel;
+ case Intrinsic::nvvm_tex_unified_1d_array_grad_v4u32_f32:
+ return NVPTXISD::TexUnified1DArrayU32FloatGrad;
+
+ case Intrinsic::nvvm_tex_unified_2d_v4f32_s32:
+ return NVPTXISD::TexUnified2DFloatS32;
+ case Intrinsic::nvvm_tex_unified_2d_v4f32_f32:
+ return NVPTXISD::TexUnified2DFloatFloat;
+ case Intrinsic::nvvm_tex_unified_2d_level_v4f32_f32:
+ return NVPTXISD::TexUnified2DFloatFloatLevel;
+ case Intrinsic::nvvm_tex_unified_2d_grad_v4f32_f32:
+ return NVPTXISD::TexUnified2DFloatFloatGrad;
+ case Intrinsic::nvvm_tex_unified_2d_v4s32_s32:
+ return NVPTXISD::TexUnified2DS32S32;
+ case Intrinsic::nvvm_tex_unified_2d_v4s32_f32:
+ return NVPTXISD::TexUnified2DS32Float;
+ case Intrinsic::nvvm_tex_unified_2d_level_v4s32_f32:
+ return NVPTXISD::TexUnified2DS32FloatLevel;
+ case Intrinsic::nvvm_tex_unified_2d_grad_v4s32_f32:
+ return NVPTXISD::TexUnified2DS32FloatGrad;
+ case Intrinsic::nvvm_tex_unified_2d_v4u32_s32:
+ return NVPTXISD::TexUnified2DU32S32;
+ case Intrinsic::nvvm_tex_unified_2d_v4u32_f32:
+ return NVPTXISD::TexUnified2DU32Float;
+ case Intrinsic::nvvm_tex_unified_2d_level_v4u32_f32:
+ return NVPTXISD::TexUnified2DU32FloatLevel;
+ case Intrinsic::nvvm_tex_unified_2d_grad_v4u32_f32:
+ return NVPTXISD::TexUnified2DU32FloatGrad;
+
+ case Intrinsic::nvvm_tex_unified_2d_array_v4f32_s32:
+ return NVPTXISD::TexUnified2DArrayFloatS32;
+ case Intrinsic::nvvm_tex_unified_2d_array_v4f32_f32:
+ return NVPTXISD::TexUnified2DArrayFloatFloat;
+ case Intrinsic::nvvm_tex_unified_2d_array_level_v4f32_f32:
+ return NVPTXISD::TexUnified2DArrayFloatFloatLevel;
+ case Intrinsic::nvvm_tex_unified_2d_array_grad_v4f32_f32:
+ return NVPTXISD::TexUnified2DArrayFloatFloatGrad;
+ case Intrinsic::nvvm_tex_unified_2d_array_v4s32_s32:
+ return NVPTXISD::TexUnified2DArrayS32S32;
+ case Intrinsic::nvvm_tex_unified_2d_array_v4s32_f32:
+ return NVPTXISD::TexUnified2DArrayS32Float;
+ case Intrinsic::nvvm_tex_unified_2d_array_level_v4s32_f32:
+ return NVPTXISD::TexUnified2DArrayS32FloatLevel;
+ case Intrinsic::nvvm_tex_unified_2d_array_grad_v4s32_f32:
+ return NVPTXISD::TexUnified2DArrayS32FloatGrad;
+ case Intrinsic::nvvm_tex_unified_2d_array_v4u32_s32:
+ return NVPTXISD::TexUnified2DArrayU32S32;
+ case Intrinsic::nvvm_tex_unified_2d_array_v4u32_f32:
+ return NVPTXISD::TexUnified2DArrayU32Float;
+ case Intrinsic::nvvm_tex_unified_2d_array_level_v4u32_f32:
+ return NVPTXISD::TexUnified2DArrayU32FloatLevel;
+ case Intrinsic::nvvm_tex_unified_2d_array_grad_v4u32_f32:
+ return NVPTXISD::TexUnified2DArrayU32FloatGrad;
+
+ case Intrinsic::nvvm_tex_unified_3d_v4f32_s32:
+ return NVPTXISD::TexUnified3DFloatS32;
+ case Intrinsic::nvvm_tex_unified_3d_v4f32_f32:
+ return NVPTXISD::TexUnified3DFloatFloat;
+ case Intrinsic::nvvm_tex_unified_3d_level_v4f32_f32:
+ return NVPTXISD::TexUnified3DFloatFloatLevel;
+ case Intrinsic::nvvm_tex_unified_3d_grad_v4f32_f32:
+ return NVPTXISD::TexUnified3DFloatFloatGrad;
+ case Intrinsic::nvvm_tex_unified_3d_v4s32_s32:
+ return NVPTXISD::TexUnified3DS32S32;
+ case Intrinsic::nvvm_tex_unified_3d_v4s32_f32:
+ return NVPTXISD::TexUnified3DS32Float;
+ case Intrinsic::nvvm_tex_unified_3d_level_v4s32_f32:
+ return NVPTXISD::TexUnified3DS32FloatLevel;
+ case Intrinsic::nvvm_tex_unified_3d_grad_v4s32_f32:
+ return NVPTXISD::TexUnified3DS32FloatGrad;
+ case Intrinsic::nvvm_tex_unified_3d_v4u32_s32:
+ return NVPTXISD::TexUnified3DU32S32;
+ case Intrinsic::nvvm_tex_unified_3d_v4u32_f32:
+ return NVPTXISD::TexUnified3DU32Float;
+ case Intrinsic::nvvm_tex_unified_3d_level_v4u32_f32:
+ return NVPTXISD::TexUnified3DU32FloatLevel;
+ case Intrinsic::nvvm_tex_unified_3d_grad_v4u32_f32:
+ return NVPTXISD::TexUnified3DU32FloatGrad;
+
+ case Intrinsic::nvvm_tex_unified_cube_v4f32_f32:
+ return NVPTXISD::TexUnifiedCubeFloatFloat;
+ case Intrinsic::nvvm_tex_unified_cube_level_v4f32_f32:
+ return NVPTXISD::TexUnifiedCubeFloatFloatLevel;
+ case Intrinsic::nvvm_tex_unified_cube_v4s32_f32:
+ return NVPTXISD::TexUnifiedCubeS32Float;
+ case Intrinsic::nvvm_tex_unified_cube_level_v4s32_f32:
+ return NVPTXISD::TexUnifiedCubeS32FloatLevel;
+ case Intrinsic::nvvm_tex_unified_cube_v4u32_f32:
+ return NVPTXISD::TexUnifiedCubeU32Float;
+ case Intrinsic::nvvm_tex_unified_cube_level_v4u32_f32:
+ return NVPTXISD::TexUnifiedCubeU32FloatLevel;
+
+ case Intrinsic::nvvm_tex_unified_cube_array_v4f32_f32:
+ return NVPTXISD::TexUnifiedCubeArrayFloatFloat;
+ case Intrinsic::nvvm_tex_unified_cube_array_level_v4f32_f32:
+ return NVPTXISD::TexUnifiedCubeArrayFloatFloatLevel;
+ case Intrinsic::nvvm_tex_unified_cube_array_v4s32_f32:
+ return NVPTXISD::TexUnifiedCubeArrayS32Float;
+ case Intrinsic::nvvm_tex_unified_cube_array_level_v4s32_f32:
+ return NVPTXISD::TexUnifiedCubeArrayS32FloatLevel;
+ case Intrinsic::nvvm_tex_unified_cube_array_v4u32_f32:
+ return NVPTXISD::TexUnifiedCubeArrayU32Float;
+ case Intrinsic::nvvm_tex_unified_cube_array_level_v4u32_f32:
+ return NVPTXISD::TexUnifiedCubeArrayU32FloatLevel;
+
+ case Intrinsic::nvvm_tld4_unified_r_2d_v4f32_f32:
+ return NVPTXISD::Tld4UnifiedR2DFloatFloat;
+ case Intrinsic::nvvm_tld4_unified_g_2d_v4f32_f32:
+ return NVPTXISD::Tld4UnifiedG2DFloatFloat;
+ case Intrinsic::nvvm_tld4_unified_b_2d_v4f32_f32:
+ return NVPTXISD::Tld4UnifiedB2DFloatFloat;
+ case Intrinsic::nvvm_tld4_unified_a_2d_v4f32_f32:
+ return NVPTXISD::Tld4UnifiedA2DFloatFloat;
+ case Intrinsic::nvvm_tld4_unified_r_2d_v4s32_f32:
+ return NVPTXISD::Tld4UnifiedR2DS64Float;
+ case Intrinsic::nvvm_tld4_unified_g_2d_v4s32_f32:
+ return NVPTXISD::Tld4UnifiedG2DS64Float;
+ case Intrinsic::nvvm_tld4_unified_b_2d_v4s32_f32:
+ return NVPTXISD::Tld4UnifiedB2DS64Float;
+ case Intrinsic::nvvm_tld4_unified_a_2d_v4s32_f32:
+ return NVPTXISD::Tld4UnifiedA2DS64Float;
+ case Intrinsic::nvvm_tld4_unified_r_2d_v4u32_f32:
+ return NVPTXISD::Tld4UnifiedR2DU64Float;
+ case Intrinsic::nvvm_tld4_unified_g_2d_v4u32_f32:
+ return NVPTXISD::Tld4UnifiedG2DU64Float;
+ case Intrinsic::nvvm_tld4_unified_b_2d_v4u32_f32:
+ return NVPTXISD::Tld4UnifiedB2DU64Float;
+ case Intrinsic::nvvm_tld4_unified_a_2d_v4u32_f32:
+ return NVPTXISD::Tld4UnifiedA2DU64Float;
+ }
+}
+
+static unsigned getOpcForSurfaceInstr(unsigned Intrinsic) {
+ switch (Intrinsic) {
+ default:
+ return 0;
+ case Intrinsic::nvvm_suld_1d_i8_clamp:
+ return NVPTXISD::Suld1DI8Clamp;
+ case Intrinsic::nvvm_suld_1d_i16_clamp:
+ return NVPTXISD::Suld1DI16Clamp;
+ case Intrinsic::nvvm_suld_1d_i32_clamp:
+ return NVPTXISD::Suld1DI32Clamp;
+ case Intrinsic::nvvm_suld_1d_i64_clamp:
+ return NVPTXISD::Suld1DI64Clamp;
+ case Intrinsic::nvvm_suld_1d_v2i8_clamp:
+ return NVPTXISD::Suld1DV2I8Clamp;
+ case Intrinsic::nvvm_suld_1d_v2i16_clamp:
+ return NVPTXISD::Suld1DV2I16Clamp;
+ case Intrinsic::nvvm_suld_1d_v2i32_clamp:
+ return NVPTXISD::Suld1DV2I32Clamp;
+ case Intrinsic::nvvm_suld_1d_v2i64_clamp:
+ return NVPTXISD::Suld1DV2I64Clamp;
+ case Intrinsic::nvvm_suld_1d_v4i8_clamp:
+ return NVPTXISD::Suld1DV4I8Clamp;
+ case Intrinsic::nvvm_suld_1d_v4i16_clamp:
+ return NVPTXISD::Suld1DV4I16Clamp;
+ case Intrinsic::nvvm_suld_1d_v4i32_clamp:
+ return NVPTXISD::Suld1DV4I32Clamp;
+ case Intrinsic::nvvm_suld_1d_array_i8_clamp:
+ return NVPTXISD::Suld1DArrayI8Clamp;
+ case Intrinsic::nvvm_suld_1d_array_i16_clamp:
+ return NVPTXISD::Suld1DArrayI16Clamp;
+ case Intrinsic::nvvm_suld_1d_array_i32_clamp:
+ return NVPTXISD::Suld1DArrayI32Clamp;
+ case Intrinsic::nvvm_suld_1d_array_i64_clamp:
+ return NVPTXISD::Suld1DArrayI64Clamp;
+ case Intrinsic::nvvm_suld_1d_array_v2i8_clamp:
+ return NVPTXISD::Suld1DArrayV2I8Clamp;
+ case Intrinsic::nvvm_suld_1d_array_v2i16_clamp:
+ return NVPTXISD::Suld1DArrayV2I16Clamp;
+ case Intrinsic::nvvm_suld_1d_array_v2i32_clamp:
+ return NVPTXISD::Suld1DArrayV2I32Clamp;
+ case Intrinsic::nvvm_suld_1d_array_v2i64_clamp:
+ return NVPTXISD::Suld1DArrayV2I64Clamp;
+ case Intrinsic::nvvm_suld_1d_array_v4i8_clamp:
+ return NVPTXISD::Suld1DArrayV4I8Clamp;
+ case Intrinsic::nvvm_suld_1d_array_v4i16_clamp:
+ return NVPTXISD::Suld1DArrayV4I16Clamp;
+ case Intrinsic::nvvm_suld_1d_array_v4i32_clamp:
+ return NVPTXISD::Suld1DArrayV4I32Clamp;
+ case Intrinsic::nvvm_suld_2d_i8_clamp:
+ return NVPTXISD::Suld2DI8Clamp;
+ case Intrinsic::nvvm_suld_2d_i16_clamp:
+ return NVPTXISD::Suld2DI16Clamp;
+ case Intrinsic::nvvm_suld_2d_i32_clamp:
+ return NVPTXISD::Suld2DI32Clamp;
+ case Intrinsic::nvvm_suld_2d_i64_clamp:
+ return NVPTXISD::Suld2DI64Clamp;
+ case Intrinsic::nvvm_suld_2d_v2i8_clamp:
+ return NVPTXISD::Suld2DV2I8Clamp;
+ case Intrinsic::nvvm_suld_2d_v2i16_clamp:
+ return NVPTXISD::Suld2DV2I16Clamp;
+ case Intrinsic::nvvm_suld_2d_v2i32_clamp:
+ return NVPTXISD::Suld2DV2I32Clamp;
+ case Intrinsic::nvvm_suld_2d_v2i64_clamp:
+ return NVPTXISD::Suld2DV2I64Clamp;
+ case Intrinsic::nvvm_suld_2d_v4i8_clamp:
+ return NVPTXISD::Suld2DV4I8Clamp;
+ case Intrinsic::nvvm_suld_2d_v4i16_clamp:
+ return NVPTXISD::Suld2DV4I16Clamp;
+ case Intrinsic::nvvm_suld_2d_v4i32_clamp:
+ return NVPTXISD::Suld2DV4I32Clamp;
+ case Intrinsic::nvvm_suld_2d_array_i8_clamp:
+ return NVPTXISD::Suld2DArrayI8Clamp;
+ case Intrinsic::nvvm_suld_2d_array_i16_clamp:
+ return NVPTXISD::Suld2DArrayI16Clamp;
+ case Intrinsic::nvvm_suld_2d_array_i32_clamp:
+ return NVPTXISD::Suld2DArrayI32Clamp;
+ case Intrinsic::nvvm_suld_2d_array_i64_clamp:
+ return NVPTXISD::Suld2DArrayI64Clamp;
+ case Intrinsic::nvvm_suld_2d_array_v2i8_clamp:
+ return NVPTXISD::Suld2DArrayV2I8Clamp;
+ case Intrinsic::nvvm_suld_2d_array_v2i16_clamp:
+ return NVPTXISD::Suld2DArrayV2I16Clamp;
+ case Intrinsic::nvvm_suld_2d_array_v2i32_clamp:
+ return NVPTXISD::Suld2DArrayV2I32Clamp;
+ case Intrinsic::nvvm_suld_2d_array_v2i64_clamp:
+ return NVPTXISD::Suld2DArrayV2I64Clamp;
+ case Intrinsic::nvvm_suld_2d_array_v4i8_clamp:
+ return NVPTXISD::Suld2DArrayV4I8Clamp;
+ case Intrinsic::nvvm_suld_2d_array_v4i16_clamp:
+ return NVPTXISD::Suld2DArrayV4I16Clamp;
+ case Intrinsic::nvvm_suld_2d_array_v4i32_clamp:
+ return NVPTXISD::Suld2DArrayV4I32Clamp;
+ case Intrinsic::nvvm_suld_3d_i8_clamp:
+ return NVPTXISD::Suld3DI8Clamp;
+ case Intrinsic::nvvm_suld_3d_i16_clamp:
+ return NVPTXISD::Suld3DI16Clamp;
+ case Intrinsic::nvvm_suld_3d_i32_clamp:
+ return NVPTXISD::Suld3DI32Clamp;
+ case Intrinsic::nvvm_suld_3d_i64_clamp:
+ return NVPTXISD::Suld3DI64Clamp;
+ case Intrinsic::nvvm_suld_3d_v2i8_clamp:
+ return NVPTXISD::Suld3DV2I8Clamp;
+ case Intrinsic::nvvm_suld_3d_v2i16_clamp:
+ return NVPTXISD::Suld3DV2I16Clamp;
+ case Intrinsic::nvvm_suld_3d_v2i32_clamp:
+ return NVPTXISD::Suld3DV2I32Clamp;
+ case Intrinsic::nvvm_suld_3d_v2i64_clamp:
+ return NVPTXISD::Suld3DV2I64Clamp;
+ case Intrinsic::nvvm_suld_3d_v4i8_clamp:
+ return NVPTXISD::Suld3DV4I8Clamp;
+ case Intrinsic::nvvm_suld_3d_v4i16_clamp:
+ return NVPTXISD::Suld3DV4I16Clamp;
+ case Intrinsic::nvvm_suld_3d_v4i32_clamp:
+ return NVPTXISD::Suld3DV4I32Clamp;
+ case Intrinsic::nvvm_suld_1d_i8_trap:
+ return NVPTXISD::Suld1DI8Trap;
+ case Intrinsic::nvvm_suld_1d_i16_trap:
+ return NVPTXISD::Suld1DI16Trap;
+ case Intrinsic::nvvm_suld_1d_i32_trap:
+ return NVPTXISD::Suld1DI32Trap;
+ case Intrinsic::nvvm_suld_1d_i64_trap:
+ return NVPTXISD::Suld1DI64Trap;
+ case Intrinsic::nvvm_suld_1d_v2i8_trap:
+ return NVPTXISD::Suld1DV2I8Trap;
+ case Intrinsic::nvvm_suld_1d_v2i16_trap:
+ return NVPTXISD::Suld1DV2I16Trap;
+ case Intrinsic::nvvm_suld_1d_v2i32_trap:
+ return NVPTXISD::Suld1DV2I32Trap;
+ case Intrinsic::nvvm_suld_1d_v2i64_trap:
+ return NVPTXISD::Suld1DV2I64Trap;
+ case Intrinsic::nvvm_suld_1d_v4i8_trap:
+ return NVPTXISD::Suld1DV4I8Trap;
+ case Intrinsic::nvvm_suld_1d_v4i16_trap:
+ return NVPTXISD::Suld1DV4I16Trap;
+ case Intrinsic::nvvm_suld_1d_v4i32_trap:
+ return NVPTXISD::Suld1DV4I32Trap;
+ case Intrinsic::nvvm_suld_1d_array_i8_trap:
+ return NVPTXISD::Suld1DArrayI8Trap;
+ case Intrinsic::nvvm_suld_1d_array_i16_trap:
+ return NVPTXISD::Suld1DArrayI16Trap;
+ case Intrinsic::nvvm_suld_1d_array_i32_trap:
+ return NVPTXISD::Suld1DArrayI32Trap;
+ case Intrinsic::nvvm_suld_1d_array_i64_trap:
+ return NVPTXISD::Suld1DArrayI64Trap;
+ case Intrinsic::nvvm_suld_1d_array_v2i8_trap:
+ return NVPTXISD::Suld1DArrayV2I8Trap;
+ case Intrinsic::nvvm_suld_1d_array_v2i16_trap:
+ return NVPTXISD::Suld1DArrayV2I16Trap;
+ case Intrinsic::nvvm_suld_1d_array_v2i32_trap:
+ return NVPTXISD::Suld1DArrayV2I32Trap;
+ case Intrinsic::nvvm_suld_1d_array_v2i64_trap:
+ return NVPTXISD::Suld1DArrayV2I64Trap;
+ case Intrinsic::nvvm_suld_1d_array_v4i8_trap:
+ return NVPTXISD::Suld1DArrayV4I8Trap;
+ case Intrinsic::nvvm_suld_1d_array_v4i16_trap:
+ return NVPTXISD::Suld1DArrayV4I16Trap;
+ case Intrinsic::nvvm_suld_1d_array_v4i32_trap:
+ return NVPTXISD::Suld1DArrayV4I32Trap;
+ case Intrinsic::nvvm_suld_2d_i8_trap:
+ return NVPTXISD::Suld2DI8Trap;
+ case Intrinsic::nvvm_suld_2d_i16_trap:
+ return NVPTXISD::Suld2DI16Trap;
+ case Intrinsic::nvvm_suld_2d_i32_trap:
+ return NVPTXISD::Suld2DI32Trap;
+ case Intrinsic::nvvm_suld_2d_i64_trap:
+ return NVPTXISD::Suld2DI64Trap;
+ case Intrinsic::nvvm_suld_2d_v2i8_trap:
+ return NVPTXISD::Suld2DV2I8Trap;
+ case Intrinsic::nvvm_suld_2d_v2i16_trap:
+ return NVPTXISD::Suld2DV2I16Trap;
+ case Intrinsic::nvvm_suld_2d_v2i32_trap:
+ return NVPTXISD::Suld2DV2I32Trap;
+ case Intrinsic::nvvm_suld_2d_v2i64_trap:
+ return NVPTXISD::Suld2DV2I64Trap;
+ case Intrinsic::nvvm_suld_2d_v4i8_trap:
+ return NVPTXISD::Suld2DV4I8Trap;
+ case Intrinsic::nvvm_suld_2d_v4i16_trap:
+ return NVPTXISD::Suld2DV4I16Trap;
+ case Intrinsic::nvvm_suld_2d_v4i32_trap:
+ return NVPTXISD::Suld2DV4I32Trap;
+ case Intrinsic::nvvm_suld_2d_array_i8_trap:
+ return NVPTXISD::Suld2DArrayI8Trap;
+ case Intrinsic::nvvm_suld_2d_array_i16_trap:
+ return NVPTXISD::Suld2DArrayI16Trap;
+ case Intrinsic::nvvm_suld_2d_array_i32_trap:
+ return NVPTXISD::Suld2DArrayI32Trap;
+ case Intrinsic::nvvm_suld_2d_array_i64_trap:
+ return NVPTXISD::Suld2DArrayI64Trap;
+ case Intrinsic::nvvm_suld_2d_array_v2i8_trap:
+ return NVPTXISD::Suld2DArrayV2I8Trap;
+ case Intrinsic::nvvm_suld_2d_array_v2i16_trap:
+ return NVPTXISD::Suld2DArrayV2I16Trap;
+ case Intrinsic::nvvm_suld_2d_array_v2i32_trap:
+ return NVPTXISD::Suld2DArrayV2I32Trap;
+ case Intrinsic::nvvm_suld_2d_array_v2i64_trap:
+ return NVPTXISD::Suld2DArrayV2I64Trap;
+ case Intrinsic::nvvm_suld_2d_array_v4i8_trap:
+ return NVPTXISD::Suld2DArrayV4I8Trap;
+ case Intrinsic::nvvm_suld_2d_array_v4i16_trap:
+ return NVPTXISD::Suld2DArrayV4I16Trap;
+ case Intrinsic::nvvm_suld_2d_array_v4i32_trap:
+ return NVPTXISD::Suld2DArrayV4I32Trap;
+ case Intrinsic::nvvm_suld_3d_i8_trap:
+ return NVPTXISD::Suld3DI8Trap;
+ case Intrinsic::nvvm_suld_3d_i16_trap:
+ return NVPTXISD::Suld3DI16Trap;
+ case Intrinsic::nvvm_suld_3d_i32_trap:
+ return NVPTXISD::Suld3DI32Trap;
+ case Intrinsic::nvvm_suld_3d_i64_trap:
+ return NVPTXISD::Suld3DI64Trap;
+ case Intrinsic::nvvm_suld_3d_v2i8_trap:
+ return NVPTXISD::Suld3DV2I8Trap;
+ case Intrinsic::nvvm_suld_3d_v2i16_trap:
+ return NVPTXISD::Suld3DV2I16Trap;
+ case Intrinsic::nvvm_suld_3d_v2i32_trap:
+ return NVPTXISD::Suld3DV2I32Trap;
+ case Intrinsic::nvvm_suld_3d_v2i64_trap:
+ return NVPTXISD::Suld3DV2I64Trap;
+ case Intrinsic::nvvm_suld_3d_v4i8_trap:
+ return NVPTXISD::Suld3DV4I8Trap;
+ case Intrinsic::nvvm_suld_3d_v4i16_trap:
+ return NVPTXISD::Suld3DV4I16Trap;
+ case Intrinsic::nvvm_suld_3d_v4i32_trap:
+ return NVPTXISD::Suld3DV4I32Trap;
+ case Intrinsic::nvvm_suld_1d_i8_zero:
+ return NVPTXISD::Suld1DI8Zero;
+ case Intrinsic::nvvm_suld_1d_i16_zero:
+ return NVPTXISD::Suld1DI16Zero;
+ case Intrinsic::nvvm_suld_1d_i32_zero:
+ return NVPTXISD::Suld1DI32Zero;
+ case Intrinsic::nvvm_suld_1d_i64_zero:
+ return NVPTXISD::Suld1DI64Zero;
+ case Intrinsic::nvvm_suld_1d_v2i8_zero:
+ return NVPTXISD::Suld1DV2I8Zero;
+ case Intrinsic::nvvm_suld_1d_v2i16_zero:
+ return NVPTXISD::Suld1DV2I16Zero;
+ case Intrinsic::nvvm_suld_1d_v2i32_zero:
+ return NVPTXISD::Suld1DV2I32Zero;
+ case Intrinsic::nvvm_suld_1d_v2i64_zero:
+ return NVPTXISD::Suld1DV2I64Zero;
+ case Intrinsic::nvvm_suld_1d_v4i8_zero:
+ return NVPTXISD::Suld1DV4I8Zero;
+ case Intrinsic::nvvm_suld_1d_v4i16_zero:
+ return NVPTXISD::Suld1DV4I16Zero;
+ case Intrinsic::nvvm_suld_1d_v4i32_zero:
+ return NVPTXISD::Suld1DV4I32Zero;
+ case Intrinsic::nvvm_suld_1d_array_i8_zero:
+ return NVPTXISD::Suld1DArrayI8Zero;
+ case Intrinsic::nvvm_suld_1d_array_i16_zero:
+ return NVPTXISD::Suld1DArrayI16Zero;
+ case Intrinsic::nvvm_suld_1d_array_i32_zero:
+ return NVPTXISD::Suld1DArrayI32Zero;
+ case Intrinsic::nvvm_suld_1d_array_i64_zero:
+ return NVPTXISD::Suld1DArrayI64Zero;
+ case Intrinsic::nvvm_suld_1d_array_v2i8_zero:
+ return NVPTXISD::Suld1DArrayV2I8Zero;
+ case Intrinsic::nvvm_suld_1d_array_v2i16_zero:
+ return NVPTXISD::Suld1DArrayV2I16Zero;
+ case Intrinsic::nvvm_suld_1d_array_v2i32_zero:
+ return NVPTXISD::Suld1DArrayV2I32Zero;
+ case Intrinsic::nvvm_suld_1d_array_v2i64_zero:
+ return NVPTXISD::Suld1DArrayV2I64Zero;
+ case Intrinsic::nvvm_suld_1d_array_v4i8_zero:
+ return NVPTXISD::Suld1DArrayV4I8Zero;
+ case Intrinsic::nvvm_suld_1d_array_v4i16_zero:
+ return NVPTXISD::Suld1DArrayV4I16Zero;
+ case Intrinsic::nvvm_suld_1d_array_v4i32_zero:
+ return NVPTXISD::Suld1DArrayV4I32Zero;
+ case Intrinsic::nvvm_suld_2d_i8_zero:
+ return NVPTXISD::Suld2DI8Zero;
+ case Intrinsic::nvvm_suld_2d_i16_zero:
+ return NVPTXISD::Suld2DI16Zero;
+ case Intrinsic::nvvm_suld_2d_i32_zero:
+ return NVPTXISD::Suld2DI32Zero;
+ case Intrinsic::nvvm_suld_2d_i64_zero:
+ return NVPTXISD::Suld2DI64Zero;
+ case Intrinsic::nvvm_suld_2d_v2i8_zero:
+ return NVPTXISD::Suld2DV2I8Zero;
+ case Intrinsic::nvvm_suld_2d_v2i16_zero:
+ return NVPTXISD::Suld2DV2I16Zero;
+ case Intrinsic::nvvm_suld_2d_v2i32_zero:
+ return NVPTXISD::Suld2DV2I32Zero;
+ case Intrinsic::nvvm_suld_2d_v2i64_zero:
+ return NVPTXISD::Suld2DV2I64Zero;
+ case Intrinsic::nvvm_suld_2d_v4i8_zero:
+ return NVPTXISD::Suld2DV4I8Zero;
+ case Intrinsic::nvvm_suld_2d_v4i16_zero:
+ return NVPTXISD::Suld2DV4I16Zero;
+ case Intrinsic::nvvm_suld_2d_v4i32_zero:
+ return NVPTXISD::Suld2DV4I32Zero;
+ case Intrinsic::nvvm_suld_2d_array_i8_zero:
+ return NVPTXISD::Suld2DArrayI8Zero;
+ case Intrinsic::nvvm_suld_2d_array_i16_zero:
+ return NVPTXISD::Suld2DArrayI16Zero;
+ case Intrinsic::nvvm_suld_2d_array_i32_zero:
+ return NVPTXISD::Suld2DArrayI32Zero;
+ case Intrinsic::nvvm_suld_2d_array_i64_zero:
+ return NVPTXISD::Suld2DArrayI64Zero;
+ case Intrinsic::nvvm_suld_2d_array_v2i8_zero:
+ return NVPTXISD::Suld2DArrayV2I8Zero;
+ case Intrinsic::nvvm_suld_2d_array_v2i16_zero:
+ return NVPTXISD::Suld2DArrayV2I16Zero;
+ case Intrinsic::nvvm_suld_2d_array_v2i32_zero:
+ return NVPTXISD::Suld2DArrayV2I32Zero;
+ case Intrinsic::nvvm_suld_2d_array_v2i64_zero:
+ return NVPTXISD::Suld2DArrayV2I64Zero;
+ case Intrinsic::nvvm_suld_2d_array_v4i8_zero:
+ return NVPTXISD::Suld2DArrayV4I8Zero;
+ case Intrinsic::nvvm_suld_2d_array_v4i16_zero:
+ return NVPTXISD::Suld2DArrayV4I16Zero;
+ case Intrinsic::nvvm_suld_2d_array_v4i32_zero:
+ return NVPTXISD::Suld2DArrayV4I32Zero;
+ case Intrinsic::nvvm_suld_3d_i8_zero:
+ return NVPTXISD::Suld3DI8Zero;
+ case Intrinsic::nvvm_suld_3d_i16_zero:
+ return NVPTXISD::Suld3DI16Zero;
+ case Intrinsic::nvvm_suld_3d_i32_zero:
+ return NVPTXISD::Suld3DI32Zero;
+ case Intrinsic::nvvm_suld_3d_i64_zero:
+ return NVPTXISD::Suld3DI64Zero;
+ case Intrinsic::nvvm_suld_3d_v2i8_zero:
+ return NVPTXISD::Suld3DV2I8Zero;
+ case Intrinsic::nvvm_suld_3d_v2i16_zero:
+ return NVPTXISD::Suld3DV2I16Zero;
+ case Intrinsic::nvvm_suld_3d_v2i32_zero:
+ return NVPTXISD::Suld3DV2I32Zero;
+ case Intrinsic::nvvm_suld_3d_v2i64_zero:
+ return NVPTXISD::Suld3DV2I64Zero;
+ case Intrinsic::nvvm_suld_3d_v4i8_zero:
+ return NVPTXISD::Suld3DV4I8Zero;
+ case Intrinsic::nvvm_suld_3d_v4i16_zero:
+ return NVPTXISD::Suld3DV4I16Zero;
+ case Intrinsic::nvvm_suld_3d_v4i32_zero:
+ return NVPTXISD::Suld3DV4I32Zero;
+ }
+}
+
+// llvm.ptx.memcpy.const and llvm.ptx.memmove.const need to be modeled as
+// TgtMemIntrinsic
+// because we need the information that is only available in the "Value" type
+// of destination
+// pointer. In particular, the address space information.
+bool NVPTXTargetLowering::getTgtMemIntrinsic(
+ IntrinsicInfo &Info, const CallInst &I, unsigned Intrinsic) const {
+ switch (Intrinsic) {
+ default:
+ return false;
+
+ case Intrinsic::nvvm_atomic_load_add_f32:
+ case Intrinsic::nvvm_atomic_load_inc_32:
+ case Intrinsic::nvvm_atomic_load_dec_32:
+
+ case Intrinsic::nvvm_atomic_add_gen_f_cta:
+ case Intrinsic::nvvm_atomic_add_gen_f_sys:
+ case Intrinsic::nvvm_atomic_add_gen_i_cta:
+ case Intrinsic::nvvm_atomic_add_gen_i_sys:
+ case Intrinsic::nvvm_atomic_and_gen_i_cta:
+ case Intrinsic::nvvm_atomic_and_gen_i_sys:
+ case Intrinsic::nvvm_atomic_cas_gen_i_cta:
+ case Intrinsic::nvvm_atomic_cas_gen_i_sys:
+ case Intrinsic::nvvm_atomic_dec_gen_i_cta:
+ case Intrinsic::nvvm_atomic_dec_gen_i_sys:
+ case Intrinsic::nvvm_atomic_inc_gen_i_cta:
+ case Intrinsic::nvvm_atomic_inc_gen_i_sys:
+ case Intrinsic::nvvm_atomic_max_gen_i_cta:
+ case Intrinsic::nvvm_atomic_max_gen_i_sys:
+ case Intrinsic::nvvm_atomic_min_gen_i_cta:
+ case Intrinsic::nvvm_atomic_min_gen_i_sys:
+ case Intrinsic::nvvm_atomic_or_gen_i_cta:
+ case Intrinsic::nvvm_atomic_or_gen_i_sys:
+ case Intrinsic::nvvm_atomic_exch_gen_i_cta:
+ case Intrinsic::nvvm_atomic_exch_gen_i_sys:
+ case Intrinsic::nvvm_atomic_xor_gen_i_cta:
+ case Intrinsic::nvvm_atomic_xor_gen_i_sys: {
+ auto &DL = I.getModule()->getDataLayout();
+ Info.opc = ISD::INTRINSIC_W_CHAIN;
+ Info.memVT = getValueType(DL, I.getType());
+ Info.ptrVal = I.getArgOperand(0);
+ Info.offset = 0;
+ Info.vol = false;
+ Info.readMem = true;
+ Info.writeMem = true;
+ Info.align = 0;
+ return true;
+ }
+
+ case Intrinsic::nvvm_ldu_global_i:
+ case Intrinsic::nvvm_ldu_global_f:
+ case Intrinsic::nvvm_ldu_global_p: {
+ auto &DL = I.getModule()->getDataLayout();
+ Info.opc = ISD::INTRINSIC_W_CHAIN;
+ if (Intrinsic == Intrinsic::nvvm_ldu_global_i)
+ Info.memVT = getValueType(DL, I.getType());
+ else if(Intrinsic == Intrinsic::nvvm_ldu_global_p)
+ Info.memVT = getPointerTy(DL);
+ else
+ Info.memVT = getValueType(DL, I.getType());
+ Info.ptrVal = I.getArgOperand(0);
+ Info.offset = 0;
+ Info.vol = false;
+ Info.readMem = true;
+ Info.writeMem = false;
+ Info.align = cast<ConstantInt>(I.getArgOperand(1))->getZExtValue();
+
+ return true;
+ }
+ case Intrinsic::nvvm_ldg_global_i:
+ case Intrinsic::nvvm_ldg_global_f:
+ case Intrinsic::nvvm_ldg_global_p: {
+ auto &DL = I.getModule()->getDataLayout();
+
+ Info.opc = ISD::INTRINSIC_W_CHAIN;
+ if (Intrinsic == Intrinsic::nvvm_ldg_global_i)
+ Info.memVT = getValueType(DL, I.getType());
+ else if(Intrinsic == Intrinsic::nvvm_ldg_global_p)
+ Info.memVT = getPointerTy(DL);
+ else
+ Info.memVT = getValueType(DL, I.getType());
+ Info.ptrVal = I.getArgOperand(0);
+ Info.offset = 0;
+ Info.vol = false;
+ Info.readMem = true;
+ Info.writeMem = false;
+ Info.align = cast<ConstantInt>(I.getArgOperand(1))->getZExtValue();
+
+ return true;
+ }
+
+ case Intrinsic::nvvm_tex_1d_v4f32_s32:
+ case Intrinsic::nvvm_tex_1d_v4f32_f32:
+ case Intrinsic::nvvm_tex_1d_level_v4f32_f32:
+ case Intrinsic::nvvm_tex_1d_grad_v4f32_f32:
+ case Intrinsic::nvvm_tex_1d_array_v4f32_s32:
+ case Intrinsic::nvvm_tex_1d_array_v4f32_f32:
+ case Intrinsic::nvvm_tex_1d_array_level_v4f32_f32:
+ case Intrinsic::nvvm_tex_1d_array_grad_v4f32_f32:
+ case Intrinsic::nvvm_tex_2d_v4f32_s32:
+ case Intrinsic::nvvm_tex_2d_v4f32_f32:
+ case Intrinsic::nvvm_tex_2d_level_v4f32_f32:
+ case Intrinsic::nvvm_tex_2d_grad_v4f32_f32:
+ case Intrinsic::nvvm_tex_2d_array_v4f32_s32:
+ case Intrinsic::nvvm_tex_2d_array_v4f32_f32:
+ case Intrinsic::nvvm_tex_2d_array_level_v4f32_f32:
+ case Intrinsic::nvvm_tex_2d_array_grad_v4f32_f32:
+ case Intrinsic::nvvm_tex_3d_v4f32_s32:
+ case Intrinsic::nvvm_tex_3d_v4f32_f32:
+ case Intrinsic::nvvm_tex_3d_level_v4f32_f32:
+ case Intrinsic::nvvm_tex_3d_grad_v4f32_f32:
+ case Intrinsic::nvvm_tex_cube_v4f32_f32:
+ case Intrinsic::nvvm_tex_cube_level_v4f32_f32:
+ case Intrinsic::nvvm_tex_cube_array_v4f32_f32:
+ case Intrinsic::nvvm_tex_cube_array_level_v4f32_f32:
+ case Intrinsic::nvvm_tld4_r_2d_v4f32_f32:
+ case Intrinsic::nvvm_tld4_g_2d_v4f32_f32:
+ case Intrinsic::nvvm_tld4_b_2d_v4f32_f32:
+ case Intrinsic::nvvm_tld4_a_2d_v4f32_f32:
+ case Intrinsic::nvvm_tex_unified_1d_v4f32_s32:
+ case Intrinsic::nvvm_tex_unified_1d_v4f32_f32:
+ case Intrinsic::nvvm_tex_unified_1d_level_v4f32_f32:
+ case Intrinsic::nvvm_tex_unified_1d_grad_v4f32_f32:
+ case Intrinsic::nvvm_tex_unified_1d_array_v4f32_s32:
+ case Intrinsic::nvvm_tex_unified_1d_array_v4f32_f32:
+ case Intrinsic::nvvm_tex_unified_1d_array_level_v4f32_f32:
+ case Intrinsic::nvvm_tex_unified_1d_array_grad_v4f32_f32:
+ case Intrinsic::nvvm_tex_unified_2d_v4f32_s32:
+ case Intrinsic::nvvm_tex_unified_2d_v4f32_f32:
+ case Intrinsic::nvvm_tex_unified_2d_level_v4f32_f32:
+ case Intrinsic::nvvm_tex_unified_2d_grad_v4f32_f32:
+ case Intrinsic::nvvm_tex_unified_2d_array_v4f32_s32:
+ case Intrinsic::nvvm_tex_unified_2d_array_v4f32_f32:
+ case Intrinsic::nvvm_tex_unified_2d_array_level_v4f32_f32:
+ case Intrinsic::nvvm_tex_unified_2d_array_grad_v4f32_f32:
+ case Intrinsic::nvvm_tex_unified_3d_v4f32_s32:
+ case Intrinsic::nvvm_tex_unified_3d_v4f32_f32:
+ case Intrinsic::nvvm_tex_unified_3d_level_v4f32_f32:
+ case Intrinsic::nvvm_tex_unified_3d_grad_v4f32_f32:
+ case Intrinsic::nvvm_tex_unified_cube_v4f32_f32:
+ case Intrinsic::nvvm_tex_unified_cube_level_v4f32_f32:
+ case Intrinsic::nvvm_tex_unified_cube_array_v4f32_f32:
+ case Intrinsic::nvvm_tex_unified_cube_array_level_v4f32_f32:
+ case Intrinsic::nvvm_tld4_unified_r_2d_v4f32_f32:
+ case Intrinsic::nvvm_tld4_unified_g_2d_v4f32_f32:
+ case Intrinsic::nvvm_tld4_unified_b_2d_v4f32_f32:
+ case Intrinsic::nvvm_tld4_unified_a_2d_v4f32_f32:
+ Info.opc = getOpcForTextureInstr(Intrinsic);
+ Info.memVT = MVT::v4f32;
+ Info.ptrVal = nullptr;
+ Info.offset = 0;
+ Info.vol = false;
+ Info.readMem = true;
+ Info.writeMem = false;
+ Info.align = 16;
+ return true;
+
+ case Intrinsic::nvvm_tex_1d_v4s32_s32:
+ case Intrinsic::nvvm_tex_1d_v4s32_f32:
+ case Intrinsic::nvvm_tex_1d_level_v4s32_f32:
+ case Intrinsic::nvvm_tex_1d_grad_v4s32_f32:
+ case Intrinsic::nvvm_tex_1d_array_v4s32_s32:
+ case Intrinsic::nvvm_tex_1d_array_v4s32_f32:
+ case Intrinsic::nvvm_tex_1d_array_level_v4s32_f32:
+ case Intrinsic::nvvm_tex_1d_array_grad_v4s32_f32:
+ case Intrinsic::nvvm_tex_2d_v4s32_s32:
+ case Intrinsic::nvvm_tex_2d_v4s32_f32:
+ case Intrinsic::nvvm_tex_2d_level_v4s32_f32:
+ case Intrinsic::nvvm_tex_2d_grad_v4s32_f32:
+ case Intrinsic::nvvm_tex_2d_array_v4s32_s32:
+ case Intrinsic::nvvm_tex_2d_array_v4s32_f32:
+ case Intrinsic::nvvm_tex_2d_array_level_v4s32_f32:
+ case Intrinsic::nvvm_tex_2d_array_grad_v4s32_f32:
+ case Intrinsic::nvvm_tex_3d_v4s32_s32:
+ case Intrinsic::nvvm_tex_3d_v4s32_f32:
+ case Intrinsic::nvvm_tex_3d_level_v4s32_f32:
+ case Intrinsic::nvvm_tex_3d_grad_v4s32_f32:
+ case Intrinsic::nvvm_tex_cube_v4s32_f32:
+ case Intrinsic::nvvm_tex_cube_level_v4s32_f32:
+ case Intrinsic::nvvm_tex_cube_array_v4s32_f32:
+ case Intrinsic::nvvm_tex_cube_array_level_v4s32_f32:
+ case Intrinsic::nvvm_tex_cube_v4u32_f32:
+ case Intrinsic::nvvm_tex_cube_level_v4u32_f32:
+ case Intrinsic::nvvm_tex_cube_array_v4u32_f32:
+ case Intrinsic::nvvm_tex_cube_array_level_v4u32_f32:
+ case Intrinsic::nvvm_tex_1d_v4u32_s32:
+ case Intrinsic::nvvm_tex_1d_v4u32_f32:
+ case Intrinsic::nvvm_tex_1d_level_v4u32_f32:
+ case Intrinsic::nvvm_tex_1d_grad_v4u32_f32:
+ case Intrinsic::nvvm_tex_1d_array_v4u32_s32:
+ case Intrinsic::nvvm_tex_1d_array_v4u32_f32:
+ case Intrinsic::nvvm_tex_1d_array_level_v4u32_f32:
+ case Intrinsic::nvvm_tex_1d_array_grad_v4u32_f32:
+ case Intrinsic::nvvm_tex_2d_v4u32_s32:
+ case Intrinsic::nvvm_tex_2d_v4u32_f32:
+ case Intrinsic::nvvm_tex_2d_level_v4u32_f32:
+ case Intrinsic::nvvm_tex_2d_grad_v4u32_f32:
+ case Intrinsic::nvvm_tex_2d_array_v4u32_s32:
+ case Intrinsic::nvvm_tex_2d_array_v4u32_f32:
+ case Intrinsic::nvvm_tex_2d_array_level_v4u32_f32:
+ case Intrinsic::nvvm_tex_2d_array_grad_v4u32_f32:
+ case Intrinsic::nvvm_tex_3d_v4u32_s32:
+ case Intrinsic::nvvm_tex_3d_v4u32_f32:
+ case Intrinsic::nvvm_tex_3d_level_v4u32_f32:
+ case Intrinsic::nvvm_tex_3d_grad_v4u32_f32:
+ case Intrinsic::nvvm_tld4_r_2d_v4s32_f32:
+ case Intrinsic::nvvm_tld4_g_2d_v4s32_f32:
+ case Intrinsic::nvvm_tld4_b_2d_v4s32_f32:
+ case Intrinsic::nvvm_tld4_a_2d_v4s32_f32:
+ case Intrinsic::nvvm_tld4_r_2d_v4u32_f32:
+ case Intrinsic::nvvm_tld4_g_2d_v4u32_f32:
+ case Intrinsic::nvvm_tld4_b_2d_v4u32_f32:
+ case Intrinsic::nvvm_tld4_a_2d_v4u32_f32:
+ case Intrinsic::nvvm_tex_unified_1d_v4s32_s32:
+ case Intrinsic::nvvm_tex_unified_1d_v4s32_f32:
+ case Intrinsic::nvvm_tex_unified_1d_level_v4s32_f32:
+ case Intrinsic::nvvm_tex_unified_1d_grad_v4s32_f32:
+ case Intrinsic::nvvm_tex_unified_1d_array_v4s32_s32:
+ case Intrinsic::nvvm_tex_unified_1d_array_v4s32_f32:
+ case Intrinsic::nvvm_tex_unified_1d_array_level_v4s32_f32:
+ case Intrinsic::nvvm_tex_unified_1d_array_grad_v4s32_f32:
+ case Intrinsic::nvvm_tex_unified_2d_v4s32_s32:
+ case Intrinsic::nvvm_tex_unified_2d_v4s32_f32:
+ case Intrinsic::nvvm_tex_unified_2d_level_v4s32_f32:
+ case Intrinsic::nvvm_tex_unified_2d_grad_v4s32_f32:
+ case Intrinsic::nvvm_tex_unified_2d_array_v4s32_s32:
+ case Intrinsic::nvvm_tex_unified_2d_array_v4s32_f32:
+ case Intrinsic::nvvm_tex_unified_2d_array_level_v4s32_f32:
+ case Intrinsic::nvvm_tex_unified_2d_array_grad_v4s32_f32:
+ case Intrinsic::nvvm_tex_unified_3d_v4s32_s32:
+ case Intrinsic::nvvm_tex_unified_3d_v4s32_f32:
+ case Intrinsic::nvvm_tex_unified_3d_level_v4s32_f32:
+ case Intrinsic::nvvm_tex_unified_3d_grad_v4s32_f32:
+ case Intrinsic::nvvm_tex_unified_1d_v4u32_s32:
+ case Intrinsic::nvvm_tex_unified_1d_v4u32_f32:
+ case Intrinsic::nvvm_tex_unified_1d_level_v4u32_f32:
+ case Intrinsic::nvvm_tex_unified_1d_grad_v4u32_f32:
+ case Intrinsic::nvvm_tex_unified_1d_array_v4u32_s32:
+ case Intrinsic::nvvm_tex_unified_1d_array_v4u32_f32:
+ case Intrinsic::nvvm_tex_unified_1d_array_level_v4u32_f32:
+ case Intrinsic::nvvm_tex_unified_1d_array_grad_v4u32_f32:
+ case Intrinsic::nvvm_tex_unified_2d_v4u32_s32:
+ case Intrinsic::nvvm_tex_unified_2d_v4u32_f32:
+ case Intrinsic::nvvm_tex_unified_2d_level_v4u32_f32:
+ case Intrinsic::nvvm_tex_unified_2d_grad_v4u32_f32:
+ case Intrinsic::nvvm_tex_unified_2d_array_v4u32_s32:
+ case Intrinsic::nvvm_tex_unified_2d_array_v4u32_f32:
+ case Intrinsic::nvvm_tex_unified_2d_array_level_v4u32_f32:
+ case Intrinsic::nvvm_tex_unified_2d_array_grad_v4u32_f32:
+ case Intrinsic::nvvm_tex_unified_3d_v4u32_s32:
+ case Intrinsic::nvvm_tex_unified_3d_v4u32_f32:
+ case Intrinsic::nvvm_tex_unified_3d_level_v4u32_f32:
+ case Intrinsic::nvvm_tex_unified_3d_grad_v4u32_f32:
+ case Intrinsic::nvvm_tex_unified_cube_v4s32_f32:
+ case Intrinsic::nvvm_tex_unified_cube_level_v4s32_f32:
+ case Intrinsic::nvvm_tex_unified_cube_array_v4s32_f32:
+ case Intrinsic::nvvm_tex_unified_cube_array_level_v4s32_f32:
+ case Intrinsic::nvvm_tex_unified_cube_v4u32_f32:
+ case Intrinsic::nvvm_tex_unified_cube_level_v4u32_f32:
+ case Intrinsic::nvvm_tex_unified_cube_array_v4u32_f32:
+ case Intrinsic::nvvm_tex_unified_cube_array_level_v4u32_f32:
+ case Intrinsic::nvvm_tld4_unified_r_2d_v4s32_f32:
+ case Intrinsic::nvvm_tld4_unified_g_2d_v4s32_f32:
+ case Intrinsic::nvvm_tld4_unified_b_2d_v4s32_f32:
+ case Intrinsic::nvvm_tld4_unified_a_2d_v4s32_f32:
+ case Intrinsic::nvvm_tld4_unified_r_2d_v4u32_f32:
+ case Intrinsic::nvvm_tld4_unified_g_2d_v4u32_f32:
+ case Intrinsic::nvvm_tld4_unified_b_2d_v4u32_f32:
+ case Intrinsic::nvvm_tld4_unified_a_2d_v4u32_f32:
+ Info.opc = getOpcForTextureInstr(Intrinsic);
+ Info.memVT = MVT::v4i32;
+ Info.ptrVal = nullptr;
+ Info.offset = 0;
+ Info.vol = false;
+ Info.readMem = true;
+ Info.writeMem = false;
+ Info.align = 16;
+ return true;
+
+ case Intrinsic::nvvm_suld_1d_i8_clamp:
+ case Intrinsic::nvvm_suld_1d_v2i8_clamp:
+ case Intrinsic::nvvm_suld_1d_v4i8_clamp:
+ case Intrinsic::nvvm_suld_1d_array_i8_clamp:
+ case Intrinsic::nvvm_suld_1d_array_v2i8_clamp:
+ case Intrinsic::nvvm_suld_1d_array_v4i8_clamp:
+ case Intrinsic::nvvm_suld_2d_i8_clamp:
+ case Intrinsic::nvvm_suld_2d_v2i8_clamp:
+ case Intrinsic::nvvm_suld_2d_v4i8_clamp:
+ case Intrinsic::nvvm_suld_2d_array_i8_clamp:
+ case Intrinsic::nvvm_suld_2d_array_v2i8_clamp:
+ case Intrinsic::nvvm_suld_2d_array_v4i8_clamp:
+ case Intrinsic::nvvm_suld_3d_i8_clamp:
+ case Intrinsic::nvvm_suld_3d_v2i8_clamp:
+ case Intrinsic::nvvm_suld_3d_v4i8_clamp:
+ case Intrinsic::nvvm_suld_1d_i8_trap:
+ case Intrinsic::nvvm_suld_1d_v2i8_trap:
+ case Intrinsic::nvvm_suld_1d_v4i8_trap:
+ case Intrinsic::nvvm_suld_1d_array_i8_trap:
+ case Intrinsic::nvvm_suld_1d_array_v2i8_trap:
+ case Intrinsic::nvvm_suld_1d_array_v4i8_trap:
+ case Intrinsic::nvvm_suld_2d_i8_trap:
+ case Intrinsic::nvvm_suld_2d_v2i8_trap:
+ case Intrinsic::nvvm_suld_2d_v4i8_trap:
+ case Intrinsic::nvvm_suld_2d_array_i8_trap:
+ case Intrinsic::nvvm_suld_2d_array_v2i8_trap:
+ case Intrinsic::nvvm_suld_2d_array_v4i8_trap:
+ case Intrinsic::nvvm_suld_3d_i8_trap:
+ case Intrinsic::nvvm_suld_3d_v2i8_trap:
+ case Intrinsic::nvvm_suld_3d_v4i8_trap:
+ case Intrinsic::nvvm_suld_1d_i8_zero:
+ case Intrinsic::nvvm_suld_1d_v2i8_zero:
+ case Intrinsic::nvvm_suld_1d_v4i8_zero:
+ case Intrinsic::nvvm_suld_1d_array_i8_zero:
+ case Intrinsic::nvvm_suld_1d_array_v2i8_zero:
+ case Intrinsic::nvvm_suld_1d_array_v4i8_zero:
+ case Intrinsic::nvvm_suld_2d_i8_zero:
+ case Intrinsic::nvvm_suld_2d_v2i8_zero:
+ case Intrinsic::nvvm_suld_2d_v4i8_zero:
+ case Intrinsic::nvvm_suld_2d_array_i8_zero:
+ case Intrinsic::nvvm_suld_2d_array_v2i8_zero:
+ case Intrinsic::nvvm_suld_2d_array_v4i8_zero:
+ case Intrinsic::nvvm_suld_3d_i8_zero:
+ case Intrinsic::nvvm_suld_3d_v2i8_zero:
+ case Intrinsic::nvvm_suld_3d_v4i8_zero:
+ Info.opc = getOpcForSurfaceInstr(Intrinsic);
+ Info.memVT = MVT::i8;
+ Info.ptrVal = nullptr;
+ Info.offset = 0;
+ Info.vol = false;
+ Info.readMem = true;
+ Info.writeMem = false;
+ Info.align = 16;
+ return true;
+
+ case Intrinsic::nvvm_suld_1d_i16_clamp:
+ case Intrinsic::nvvm_suld_1d_v2i16_clamp:
+ case Intrinsic::nvvm_suld_1d_v4i16_clamp:
+ case Intrinsic::nvvm_suld_1d_array_i16_clamp:
+ case Intrinsic::nvvm_suld_1d_array_v2i16_clamp:
+ case Intrinsic::nvvm_suld_1d_array_v4i16_clamp:
+ case Intrinsic::nvvm_suld_2d_i16_clamp:
+ case Intrinsic::nvvm_suld_2d_v2i16_clamp:
+ case Intrinsic::nvvm_suld_2d_v4i16_clamp:
+ case Intrinsic::nvvm_suld_2d_array_i16_clamp:
+ case Intrinsic::nvvm_suld_2d_array_v2i16_clamp:
+ case Intrinsic::nvvm_suld_2d_array_v4i16_clamp:
+ case Intrinsic::nvvm_suld_3d_i16_clamp:
+ case Intrinsic::nvvm_suld_3d_v2i16_clamp:
+ case Intrinsic::nvvm_suld_3d_v4i16_clamp:
+ case Intrinsic::nvvm_suld_1d_i16_trap:
+ case Intrinsic::nvvm_suld_1d_v2i16_trap:
+ case Intrinsic::nvvm_suld_1d_v4i16_trap:
+ case Intrinsic::nvvm_suld_1d_array_i16_trap:
+ case Intrinsic::nvvm_suld_1d_array_v2i16_trap:
+ case Intrinsic::nvvm_suld_1d_array_v4i16_trap:
+ case Intrinsic::nvvm_suld_2d_i16_trap:
+ case Intrinsic::nvvm_suld_2d_v2i16_trap:
+ case Intrinsic::nvvm_suld_2d_v4i16_trap:
+ case Intrinsic::nvvm_suld_2d_array_i16_trap:
+ case Intrinsic::nvvm_suld_2d_array_v2i16_trap:
+ case Intrinsic::nvvm_suld_2d_array_v4i16_trap:
+ case Intrinsic::nvvm_suld_3d_i16_trap:
+ case Intrinsic::nvvm_suld_3d_v2i16_trap:
+ case Intrinsic::nvvm_suld_3d_v4i16_trap:
+ case Intrinsic::nvvm_suld_1d_i16_zero:
+ case Intrinsic::nvvm_suld_1d_v2i16_zero:
+ case Intrinsic::nvvm_suld_1d_v4i16_zero:
+ case Intrinsic::nvvm_suld_1d_array_i16_zero:
+ case Intrinsic::nvvm_suld_1d_array_v2i16_zero:
+ case Intrinsic::nvvm_suld_1d_array_v4i16_zero:
+ case Intrinsic::nvvm_suld_2d_i16_zero:
+ case Intrinsic::nvvm_suld_2d_v2i16_zero:
+ case Intrinsic::nvvm_suld_2d_v4i16_zero:
+ case Intrinsic::nvvm_suld_2d_array_i16_zero:
+ case Intrinsic::nvvm_suld_2d_array_v2i16_zero:
+ case Intrinsic::nvvm_suld_2d_array_v4i16_zero:
+ case Intrinsic::nvvm_suld_3d_i16_zero:
+ case Intrinsic::nvvm_suld_3d_v2i16_zero:
+ case Intrinsic::nvvm_suld_3d_v4i16_zero:
+ Info.opc = getOpcForSurfaceInstr(Intrinsic);
+ Info.memVT = MVT::i16;
+ Info.ptrVal = nullptr;
+ Info.offset = 0;
+ Info.vol = false;
+ Info.readMem = true;
+ Info.writeMem = false;
+ Info.align = 16;
+ return true;
+
+ case Intrinsic::nvvm_suld_1d_i32_clamp:
+ case Intrinsic::nvvm_suld_1d_v2i32_clamp:
+ case Intrinsic::nvvm_suld_1d_v4i32_clamp:
+ case Intrinsic::nvvm_suld_1d_array_i32_clamp:
+ case Intrinsic::nvvm_suld_1d_array_v2i32_clamp:
+ case Intrinsic::nvvm_suld_1d_array_v4i32_clamp:
+ case Intrinsic::nvvm_suld_2d_i32_clamp:
+ case Intrinsic::nvvm_suld_2d_v2i32_clamp:
+ case Intrinsic::nvvm_suld_2d_v4i32_clamp:
+ case Intrinsic::nvvm_suld_2d_array_i32_clamp:
+ case Intrinsic::nvvm_suld_2d_array_v2i32_clamp:
+ case Intrinsic::nvvm_suld_2d_array_v4i32_clamp:
+ case Intrinsic::nvvm_suld_3d_i32_clamp:
+ case Intrinsic::nvvm_suld_3d_v2i32_clamp:
+ case Intrinsic::nvvm_suld_3d_v4i32_clamp:
+ case Intrinsic::nvvm_suld_1d_i32_trap:
+ case Intrinsic::nvvm_suld_1d_v2i32_trap:
+ case Intrinsic::nvvm_suld_1d_v4i32_trap:
+ case Intrinsic::nvvm_suld_1d_array_i32_trap:
+ case Intrinsic::nvvm_suld_1d_array_v2i32_trap:
+ case Intrinsic::nvvm_suld_1d_array_v4i32_trap:
+ case Intrinsic::nvvm_suld_2d_i32_trap:
+ case Intrinsic::nvvm_suld_2d_v2i32_trap:
+ case Intrinsic::nvvm_suld_2d_v4i32_trap:
+ case Intrinsic::nvvm_suld_2d_array_i32_trap:
+ case Intrinsic::nvvm_suld_2d_array_v2i32_trap:
+ case Intrinsic::nvvm_suld_2d_array_v4i32_trap:
+ case Intrinsic::nvvm_suld_3d_i32_trap:
+ case Intrinsic::nvvm_suld_3d_v2i32_trap:
+ case Intrinsic::nvvm_suld_3d_v4i32_trap:
+ case Intrinsic::nvvm_suld_1d_i32_zero:
+ case Intrinsic::nvvm_suld_1d_v2i32_zero:
+ case Intrinsic::nvvm_suld_1d_v4i32_zero:
+ case Intrinsic::nvvm_suld_1d_array_i32_zero:
+ case Intrinsic::nvvm_suld_1d_array_v2i32_zero:
+ case Intrinsic::nvvm_suld_1d_array_v4i32_zero:
+ case Intrinsic::nvvm_suld_2d_i32_zero:
+ case Intrinsic::nvvm_suld_2d_v2i32_zero:
+ case Intrinsic::nvvm_suld_2d_v4i32_zero:
+ case Intrinsic::nvvm_suld_2d_array_i32_zero:
+ case Intrinsic::nvvm_suld_2d_array_v2i32_zero:
+ case Intrinsic::nvvm_suld_2d_array_v4i32_zero:
+ case Intrinsic::nvvm_suld_3d_i32_zero:
+ case Intrinsic::nvvm_suld_3d_v2i32_zero:
+ case Intrinsic::nvvm_suld_3d_v4i32_zero:
+ Info.opc = getOpcForSurfaceInstr(Intrinsic);
+ Info.memVT = MVT::i32;
+ Info.ptrVal = nullptr;
+ Info.offset = 0;
+ Info.vol = false;
+ Info.readMem = true;
+ Info.writeMem = false;
+ Info.align = 16;
+ return true;
+
+ case Intrinsic::nvvm_suld_1d_i64_clamp:
+ case Intrinsic::nvvm_suld_1d_v2i64_clamp:
+ case Intrinsic::nvvm_suld_1d_array_i64_clamp:
+ case Intrinsic::nvvm_suld_1d_array_v2i64_clamp:
+ case Intrinsic::nvvm_suld_2d_i64_clamp:
+ case Intrinsic::nvvm_suld_2d_v2i64_clamp:
+ case Intrinsic::nvvm_suld_2d_array_i64_clamp:
+ case Intrinsic::nvvm_suld_2d_array_v2i64_clamp:
+ case Intrinsic::nvvm_suld_3d_i64_clamp:
+ case Intrinsic::nvvm_suld_3d_v2i64_clamp:
+ case Intrinsic::nvvm_suld_1d_i64_trap:
+ case Intrinsic::nvvm_suld_1d_v2i64_trap:
+ case Intrinsic::nvvm_suld_1d_array_i64_trap:
+ case Intrinsic::nvvm_suld_1d_array_v2i64_trap:
+ case Intrinsic::nvvm_suld_2d_i64_trap:
+ case Intrinsic::nvvm_suld_2d_v2i64_trap:
+ case Intrinsic::nvvm_suld_2d_array_i64_trap:
+ case Intrinsic::nvvm_suld_2d_array_v2i64_trap:
+ case Intrinsic::nvvm_suld_3d_i64_trap:
+ case Intrinsic::nvvm_suld_3d_v2i64_trap:
+ case Intrinsic::nvvm_suld_1d_i64_zero:
+ case Intrinsic::nvvm_suld_1d_v2i64_zero:
+ case Intrinsic::nvvm_suld_1d_array_i64_zero:
+ case Intrinsic::nvvm_suld_1d_array_v2i64_zero:
+ case Intrinsic::nvvm_suld_2d_i64_zero:
+ case Intrinsic::nvvm_suld_2d_v2i64_zero:
+ case Intrinsic::nvvm_suld_2d_array_i64_zero:
+ case Intrinsic::nvvm_suld_2d_array_v2i64_zero:
+ case Intrinsic::nvvm_suld_3d_i64_zero:
+ case Intrinsic::nvvm_suld_3d_v2i64_zero:
+ Info.opc = getOpcForSurfaceInstr(Intrinsic);
+ Info.memVT = MVT::i64;
+ Info.ptrVal = nullptr;
+ Info.offset = 0;
+ Info.vol = false;
+ Info.readMem = true;
+ Info.writeMem = false;
+ Info.align = 16;
+ return true;
+ }
+ return false;
+}
+
+/// isLegalAddressingMode - Return true if the addressing mode represented
+/// by AM is legal for this target, for a load/store of the specified type.
+/// Used to guide target specific optimizations, like loop strength reduction
+/// (LoopStrengthReduce.cpp) and memory optimization for address mode
+/// (CodeGenPrepare.cpp)
+bool NVPTXTargetLowering::isLegalAddressingMode(const DataLayout &DL,
+ const AddrMode &AM, Type *Ty,
+ unsigned AS) const {
+ // AddrMode - This represents an addressing mode of:
+ // BaseGV + BaseOffs + BaseReg + Scale*ScaleReg
+ //
+ // The legal address modes are
+ // - [avar]
+ // - [areg]
+ // - [areg+immoff]
+ // - [immAddr]
+
+ if (AM.BaseGV) {
+ return !AM.BaseOffs && !AM.HasBaseReg && !AM.Scale;
+ }
+
+ switch (AM.Scale) {
+ case 0: // "r", "r+i" or "i" is allowed
+ break;
+ case 1:
+ if (AM.HasBaseReg) // "r+r+i" or "r+r" is not allowed.
+ return false;
+ // Otherwise we have r+i.
+ break;
+ default:
+ // No scale > 1 is allowed
+ return false;
+ }
+ return true;
+}
+
+//===----------------------------------------------------------------------===//
+// NVPTX Inline Assembly Support
+//===----------------------------------------------------------------------===//
+
+/// getConstraintType - Given a constraint letter, return the type of
+/// constraint it is for this target.
+NVPTXTargetLowering::ConstraintType
+NVPTXTargetLowering::getConstraintType(StringRef Constraint) const {
+ if (Constraint.size() == 1) {
+ switch (Constraint[0]) {
+ default:
+ break;
+ case 'b':
+ case 'r':
+ case 'h':
+ case 'c':
+ case 'l':
+ case 'f':
+ case 'd':
+ case '0':
+ case 'N':
+ return C_RegisterClass;
+ }
+ }
+ return TargetLowering::getConstraintType(Constraint);
+}
+
+std::pair<unsigned, const TargetRegisterClass *>
+NVPTXTargetLowering::getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI,
+ StringRef Constraint,
+ MVT VT) const {
+ if (Constraint.size() == 1) {
+ switch (Constraint[0]) {
+ case 'b':
+ return std::make_pair(0U, &NVPTX::Int1RegsRegClass);
+ case 'c':
+ return std::make_pair(0U, &NVPTX::Int16RegsRegClass);
+ case 'h':
+ return std::make_pair(0U, &NVPTX::Int16RegsRegClass);
+ case 'r':
+ return std::make_pair(0U, &NVPTX::Int32RegsRegClass);
+ case 'l':
+ case 'N':
+ return std::make_pair(0U, &NVPTX::Int64RegsRegClass);
+ case 'f':
+ return std::make_pair(0U, &NVPTX::Float32RegsRegClass);
+ case 'd':
+ return std::make_pair(0U, &NVPTX::Float64RegsRegClass);
+ }
+ }
+ return TargetLowering::getRegForInlineAsmConstraint(TRI, Constraint, VT);
+}
+
+//===----------------------------------------------------------------------===//
+// NVPTX DAG Combining
+//===----------------------------------------------------------------------===//
+
+bool NVPTXTargetLowering::allowFMA(MachineFunction &MF,
+ CodeGenOpt::Level OptLevel) const {
+ // Always honor command-line argument
+ if (FMAContractLevelOpt.getNumOccurrences() > 0)
+ return FMAContractLevelOpt > 0;
+
+ // Do not contract if we're not optimizing the code.
+ if (OptLevel == 0)
+ return false;
+
+ // Honor TargetOptions flags that explicitly say fusion is okay.
+ if (MF.getTarget().Options.AllowFPOpFusion == FPOpFusion::Fast)
+ return true;
+
+ return allowUnsafeFPMath(MF);
+}
+
+bool NVPTXTargetLowering::allowUnsafeFPMath(MachineFunction &MF) const {
+ // Honor TargetOptions flags that explicitly say unsafe math is okay.
+ if (MF.getTarget().Options.UnsafeFPMath)
+ return true;
+
+ // Allow unsafe math if unsafe-fp-math attribute explicitly says so.
+ const Function *F = MF.getFunction();
+ if (F->hasFnAttribute("unsafe-fp-math")) {
+ Attribute Attr = F->getFnAttribute("unsafe-fp-math");
+ StringRef Val = Attr.getValueAsString();
+ if (Val == "true")
+ return true;
+ }
+
+ return false;
+}
+
+/// PerformADDCombineWithOperands - Try DAG combinations for an ADD with
+/// operands N0 and N1. This is a helper for PerformADDCombine that is
+/// called with the default operands, and if that fails, with commuted
+/// operands.
+static SDValue PerformADDCombineWithOperands(SDNode *N, SDValue N0, SDValue N1,
+ TargetLowering::DAGCombinerInfo &DCI,
+ const NVPTXSubtarget &Subtarget,
+ CodeGenOpt::Level OptLevel) {
+ SelectionDAG &DAG = DCI.DAG;
+ // Skip non-integer, non-scalar case
+ EVT VT=N0.getValueType();
+ if (VT.isVector())
+ return SDValue();
+
+ // fold (add (mul a, b), c) -> (mad a, b, c)
+ //
+ if (N0.getOpcode() == ISD::MUL) {
+ assert (VT.isInteger());
+ // For integer:
+ // Since integer multiply-add costs the same as integer multiply
+ // but is more costly than integer add, do the fusion only when
+ // the mul is only used in the add.
+ if (OptLevel==CodeGenOpt::None || VT != MVT::i32 ||
+ !N0.getNode()->hasOneUse())
+ return SDValue();
+
+ // Do the folding
+ return DAG.getNode(NVPTXISD::IMAD, SDLoc(N), VT,
+ N0.getOperand(0), N0.getOperand(1), N1);
+ }
+ else if (N0.getOpcode() == ISD::FMUL) {
+ if (VT == MVT::f32 || VT == MVT::f64) {
+ const auto *TLI = static_cast<const NVPTXTargetLowering *>(
+ &DAG.getTargetLoweringInfo());
+ if (!TLI->allowFMA(DAG.getMachineFunction(), OptLevel))
+ return SDValue();
+
+ // For floating point:
+ // Do the fusion only when the mul has less than 5 uses and all
+ // are add.
+ // The heuristic is that if a use is not an add, then that use
+ // cannot be fused into fma, therefore mul is still needed anyway.
+ // If there are more than 4 uses, even if they are all add, fusing
+ // them will increase register pressue.
+ //
+ int numUses = 0;
+ int nonAddCount = 0;
+ for (SDNode::use_iterator UI = N0.getNode()->use_begin(),
+ UE = N0.getNode()->use_end();
+ UI != UE; ++UI) {
+ numUses++;
+ SDNode *User = *UI;
+ if (User->getOpcode() != ISD::FADD)
+ ++nonAddCount;
+ }
+ if (numUses >= 5)
+ return SDValue();
+ if (nonAddCount) {
+ int orderNo = N->getIROrder();
+ int orderNo2 = N0.getNode()->getIROrder();
+ // simple heuristics here for considering potential register
+ // pressure, the logics here is that the differnce are used
+ // to measure the distance between def and use, the longer distance
+ // more likely cause register pressure.
+ if (orderNo - orderNo2 < 500)
+ return SDValue();
+
+ // Now, check if at least one of the FMUL's operands is live beyond the node N,
+ // which guarantees that the FMA will not increase register pressure at node N.
+ bool opIsLive = false;
+ const SDNode *left = N0.getOperand(0).getNode();
+ const SDNode *right = N0.getOperand(1).getNode();
+
+ if (isa<ConstantSDNode>(left) || isa<ConstantSDNode>(right))
+ opIsLive = true;
+
+ if (!opIsLive)
+ for (SDNode::use_iterator UI = left->use_begin(), UE = left->use_end(); UI != UE; ++UI) {
+ SDNode *User = *UI;
+ int orderNo3 = User->getIROrder();
+ if (orderNo3 > orderNo) {
+ opIsLive = true;
+ break;
+ }
+ }
+
+ if (!opIsLive)
+ for (SDNode::use_iterator UI = right->use_begin(), UE = right->use_end(); UI != UE; ++UI) {
+ SDNode *User = *UI;
+ int orderNo3 = User->getIROrder();
+ if (orderNo3 > orderNo) {
+ opIsLive = true;
+ break;
+ }
+ }
+
+ if (!opIsLive)
+ return SDValue();
+ }
+
+ return DAG.getNode(ISD::FMA, SDLoc(N), VT,
+ N0.getOperand(0), N0.getOperand(1), N1);
+ }
+ }
+
+ return SDValue();
+}
+
+/// PerformADDCombine - Target-specific dag combine xforms for ISD::ADD.
+///
+static SDValue PerformADDCombine(SDNode *N,
+ TargetLowering::DAGCombinerInfo &DCI,
+ const NVPTXSubtarget &Subtarget,
+ CodeGenOpt::Level OptLevel) {
+ SDValue N0 = N->getOperand(0);
+ SDValue N1 = N->getOperand(1);
+
+ // First try with the default operand order.
+ if (SDValue Result =
+ PerformADDCombineWithOperands(N, N0, N1, DCI, Subtarget, OptLevel))
+ return Result;
+
+ // If that didn't work, try again with the operands commuted.
+ return PerformADDCombineWithOperands(N, N1, N0, DCI, Subtarget, OptLevel);
+}
+
+static SDValue PerformANDCombine(SDNode *N,
+ TargetLowering::DAGCombinerInfo &DCI) {
+ // The type legalizer turns a vector load of i8 values into a zextload to i16
+ // registers, optionally ANY_EXTENDs it (if target type is integer),
+ // and ANDs off the high 8 bits. Since we turn this load into a
+ // target-specific DAG node, the DAG combiner fails to eliminate these AND
+ // nodes. Do that here.
+ SDValue Val = N->getOperand(0);
+ SDValue Mask = N->getOperand(1);
+
+ if (isa<ConstantSDNode>(Val)) {
+ std::swap(Val, Mask);
+ }
+
+ SDValue AExt;
+ // Generally, we will see zextload -> IMOV16rr -> ANY_EXTEND -> and
+ if (Val.getOpcode() == ISD::ANY_EXTEND) {
+ AExt = Val;
+ Val = Val->getOperand(0);
+ }
+
+ if (Val->isMachineOpcode() && Val->getMachineOpcode() == NVPTX::IMOV16rr) {
+ Val = Val->getOperand(0);
+ }
+
+ if (Val->getOpcode() == NVPTXISD::LoadV2 ||
+ Val->getOpcode() == NVPTXISD::LoadV4) {
+ ConstantSDNode *MaskCnst = dyn_cast<ConstantSDNode>(Mask);
+ if (!MaskCnst) {
+ // Not an AND with a constant
+ return SDValue();
+ }
+
+ uint64_t MaskVal = MaskCnst->getZExtValue();
+ if (MaskVal != 0xff) {
+ // Not an AND that chops off top 8 bits
+ return SDValue();
+ }
+
+ MemSDNode *Mem = dyn_cast<MemSDNode>(Val);
+ if (!Mem) {
+ // Not a MemSDNode?!?
+ return SDValue();
+ }
+
+ EVT MemVT = Mem->getMemoryVT();
+ if (MemVT != MVT::v2i8 && MemVT != MVT::v4i8) {
+ // We only handle the i8 case
+ return SDValue();
+ }
+
+ unsigned ExtType =
+ cast<ConstantSDNode>(Val->getOperand(Val->getNumOperands()-1))->
+ getZExtValue();
+ if (ExtType == ISD::SEXTLOAD) {
+ // If for some reason the load is a sextload, the and is needed to zero
+ // out the high 8 bits
+ return SDValue();
+ }
+
+ bool AddTo = false;
+ if (AExt.getNode() != nullptr) {
+ // Re-insert the ext as a zext.
+ Val = DCI.DAG.getNode(ISD::ZERO_EXTEND, SDLoc(N),
+ AExt.getValueType(), Val);
+ AddTo = true;
+ }
+
+ // If we get here, the AND is unnecessary. Just replace it with the load
+ DCI.CombineTo(N, Val, AddTo);
+ }
+
+ return SDValue();
+}
+
+static SDValue PerformREMCombine(SDNode *N,
+ TargetLowering::DAGCombinerInfo &DCI,
+ CodeGenOpt::Level OptLevel) {
+ assert(N->getOpcode() == ISD::SREM || N->getOpcode() == ISD::UREM);
+
+ // Don't do anything at less than -O2.
+ if (OptLevel < CodeGenOpt::Default)
+ return SDValue();
+
+ SelectionDAG &DAG = DCI.DAG;
+ SDLoc DL(N);
+ EVT VT = N->getValueType(0);
+ bool IsSigned = N->getOpcode() == ISD::SREM;
+ unsigned DivOpc = IsSigned ? ISD::SDIV : ISD::UDIV;
+
+ const SDValue &Num = N->getOperand(0);
+ const SDValue &Den = N->getOperand(1);
+
+ for (const SDNode *U : Num->uses()) {
+ if (U->getOpcode() == DivOpc && U->getOperand(0) == Num &&
+ U->getOperand(1) == Den) {
+ // Num % Den -> Num - (Num / Den) * Den
+ return DAG.getNode(ISD::SUB, DL, VT, Num,
+ DAG.getNode(ISD::MUL, DL, VT,
+ DAG.getNode(DivOpc, DL, VT, Num, Den),
+ Den));
+ }
+ }
+ return SDValue();
+}
+
+enum OperandSignedness {
+ Signed = 0,
+ Unsigned,
+ Unknown
+};
+
+/// IsMulWideOperandDemotable - Checks if the provided DAG node is an operand
+/// that can be demoted to \p OptSize bits without loss of information. The
+/// signedness of the operand, if determinable, is placed in \p S.
+static bool IsMulWideOperandDemotable(SDValue Op,
+ unsigned OptSize,
+ OperandSignedness &S) {
+ S = Unknown;
+
+ if (Op.getOpcode() == ISD::SIGN_EXTEND ||
+ Op.getOpcode() == ISD::SIGN_EXTEND_INREG) {
+ EVT OrigVT = Op.getOperand(0).getValueType();
+ if (OrigVT.getSizeInBits() <= OptSize) {
+ S = Signed;
+ return true;
+ }
+ } else if (Op.getOpcode() == ISD::ZERO_EXTEND) {
+ EVT OrigVT = Op.getOperand(0).getValueType();
+ if (OrigVT.getSizeInBits() <= OptSize) {
+ S = Unsigned;
+ return true;
+ }
+ }
+
+ return false;
+}
+
+/// AreMulWideOperandsDemotable - Checks if the given LHS and RHS operands can
+/// be demoted to \p OptSize bits without loss of information. If the operands
+/// contain a constant, it should appear as the RHS operand. The signedness of
+/// the operands is placed in \p IsSigned.
+static bool AreMulWideOperandsDemotable(SDValue LHS, SDValue RHS,
+ unsigned OptSize,
+ bool &IsSigned) {
+ OperandSignedness LHSSign;
+
+ // The LHS operand must be a demotable op
+ if (!IsMulWideOperandDemotable(LHS, OptSize, LHSSign))
+ return false;
+
+ // We should have been able to determine the signedness from the LHS
+ if (LHSSign == Unknown)
+ return false;
+
+ IsSigned = (LHSSign == Signed);
+
+ // The RHS can be a demotable op or a constant
+ if (ConstantSDNode *CI = dyn_cast<ConstantSDNode>(RHS)) {
+ const APInt &Val = CI->getAPIntValue();
+ if (LHSSign == Unsigned) {
+ return Val.isIntN(OptSize);
+ } else {
+ return Val.isSignedIntN(OptSize);
+ }
+ } else {
+ OperandSignedness RHSSign;
+ if (!IsMulWideOperandDemotable(RHS, OptSize, RHSSign))
+ return false;
+
+ return LHSSign == RHSSign;
+ }
+}
+
+/// TryMULWIDECombine - Attempt to replace a multiply of M bits with a multiply
+/// of M/2 bits that produces an M-bit result (i.e. mul.wide). This transform
+/// works on both multiply DAG nodes and SHL DAG nodes with a constant shift
+/// amount.
+static SDValue TryMULWIDECombine(SDNode *N,
+ TargetLowering::DAGCombinerInfo &DCI) {
+ EVT MulType = N->getValueType(0);
+ if (MulType != MVT::i32 && MulType != MVT::i64) {
+ return SDValue();
+ }
+
+ SDLoc DL(N);
+ unsigned OptSize = MulType.getSizeInBits() >> 1;
+ SDValue LHS = N->getOperand(0);
+ SDValue RHS = N->getOperand(1);
+
+ // Canonicalize the multiply so the constant (if any) is on the right
+ if (N->getOpcode() == ISD::MUL) {
+ if (isa<ConstantSDNode>(LHS)) {
+ std::swap(LHS, RHS);
+ }
+ }
+
+ // If we have a SHL, determine the actual multiply amount
+ if (N->getOpcode() == ISD::SHL) {
+ ConstantSDNode *ShlRHS = dyn_cast<ConstantSDNode>(RHS);
+ if (!ShlRHS) {
+ return SDValue();
+ }
+
+ APInt ShiftAmt = ShlRHS->getAPIntValue();
+ unsigned BitWidth = MulType.getSizeInBits();
+ if (ShiftAmt.sge(0) && ShiftAmt.slt(BitWidth)) {
+ APInt MulVal = APInt(BitWidth, 1) << ShiftAmt;
+ RHS = DCI.DAG.getConstant(MulVal, DL, MulType);
+ } else {
+ return SDValue();
+ }
+ }
+
+ bool Signed;
+ // Verify that our operands are demotable
+ if (!AreMulWideOperandsDemotable(LHS, RHS, OptSize, Signed)) {
+ return SDValue();
+ }
+
+ EVT DemotedVT;
+ if (MulType == MVT::i32) {
+ DemotedVT = MVT::i16;
+ } else {
+ DemotedVT = MVT::i32;
+ }
+
+ // Truncate the operands to the correct size. Note that these are just for
+ // type consistency and will (likely) be eliminated in later phases.
+ SDValue TruncLHS =
+ DCI.DAG.getNode(ISD::TRUNCATE, DL, DemotedVT, LHS);
+ SDValue TruncRHS =
+ DCI.DAG.getNode(ISD::TRUNCATE, DL, DemotedVT, RHS);
+
+ unsigned Opc;
+ if (Signed) {
+ Opc = NVPTXISD::MUL_WIDE_SIGNED;
+ } else {
+ Opc = NVPTXISD::MUL_WIDE_UNSIGNED;
+ }
+
+ return DCI.DAG.getNode(Opc, DL, MulType, TruncLHS, TruncRHS);
+}
+
+/// PerformMULCombine - Runs PTX-specific DAG combine patterns on MUL nodes.
+static SDValue PerformMULCombine(SDNode *N,
+ TargetLowering::DAGCombinerInfo &DCI,
+ CodeGenOpt::Level OptLevel) {
+ if (OptLevel > 0) {
+ // Try mul.wide combining at OptLevel > 0
+ if (SDValue Ret = TryMULWIDECombine(N, DCI))
+ return Ret;
+ }
+
+ return SDValue();
+}
+
+/// PerformSHLCombine - Runs PTX-specific DAG combine patterns on SHL nodes.
+static SDValue PerformSHLCombine(SDNode *N,
+ TargetLowering::DAGCombinerInfo &DCI,
+ CodeGenOpt::Level OptLevel) {
+ if (OptLevel > 0) {
+ // Try mul.wide combining at OptLevel > 0
+ if (SDValue Ret = TryMULWIDECombine(N, DCI))
+ return Ret;
+ }
+
+ return SDValue();
+}
+
+static SDValue PerformSETCCCombine(SDNode *N,
+ TargetLowering::DAGCombinerInfo &DCI) {
+ EVT CCType = N->getValueType(0);
+ SDValue A = N->getOperand(0);
+ SDValue B = N->getOperand(1);
+
+ if (CCType != MVT::v2i1 || A.getValueType() != MVT::v2f16)
+ return SDValue();
+
+ SDLoc DL(N);
+ // setp.f16x2 returns two scalar predicates, which we need to
+ // convert back to v2i1. The returned result will be scalarized by
+ // the legalizer, but the comparison will remain a single vector
+ // instruction.
+ SDValue CCNode = DCI.DAG.getNode(NVPTXISD::SETP_F16X2, DL,
+ DCI.DAG.getVTList(MVT::i1, MVT::i1),
+ {A, B, N->getOperand(2)});
+ return DCI.DAG.getNode(ISD::BUILD_VECTOR, DL, CCType, CCNode.getValue(0),
+ CCNode.getValue(1));
+}
+
+SDValue NVPTXTargetLowering::PerformDAGCombine(SDNode *N,
+ DAGCombinerInfo &DCI) const {
+ CodeGenOpt::Level OptLevel = getTargetMachine().getOptLevel();
+ switch (N->getOpcode()) {
+ default: break;
+ case ISD::ADD:
+ case ISD::FADD:
+ return PerformADDCombine(N, DCI, STI, OptLevel);
+ case ISD::MUL:
+ return PerformMULCombine(N, DCI, OptLevel);
+ case ISD::SHL:
+ return PerformSHLCombine(N, DCI, OptLevel);
+ case ISD::AND:
+ return PerformANDCombine(N, DCI);
+ case ISD::UREM:
+ case ISD::SREM:
+ return PerformREMCombine(N, DCI, OptLevel);
+ case ISD::SETCC:
+ return PerformSETCCCombine(N, DCI);
+ }
+ return SDValue();
+}
+
+/// ReplaceVectorLoad - Convert vector loads into multi-output scalar loads.
+static void ReplaceLoadVector(SDNode *N, SelectionDAG &DAG,
+ SmallVectorImpl<SDValue> &Results) {
+ EVT ResVT = N->getValueType(0);
+ SDLoc DL(N);
+
+ assert(ResVT.isVector() && "Vector load must have vector type");
+
+ // We only handle "native" vector sizes for now, e.g. <4 x double> is not
+ // legal. We can (and should) split that into 2 loads of <2 x double> here
+ // but I'm leaving that as a TODO for now.
+ assert(ResVT.isSimple() && "Can only handle simple types");
+ switch (ResVT.getSimpleVT().SimpleTy) {
+ default:
+ return;
+ case MVT::v2i8:
+ case MVT::v2i16:
+ case MVT::v2i32:
+ case MVT::v2i64:
+ case MVT::v2f16:
+ case MVT::v2f32:
+ case MVT::v2f64:
+ case MVT::v4i8:
+ case MVT::v4i16:
+ case MVT::v4i32:
+ case MVT::v4f16:
+ case MVT::v4f32:
+ case MVT::v8f16: // <4 x f16x2>
+ // This is a "native" vector type
+ break;
+ }
+
+ LoadSDNode *LD = cast<LoadSDNode>(N);
+
+ unsigned Align = LD->getAlignment();
+ auto &TD = DAG.getDataLayout();
+ unsigned PrefAlign =
+ TD.getPrefTypeAlignment(ResVT.getTypeForEVT(*DAG.getContext()));
+ if (Align < PrefAlign) {
+ // This load is not sufficiently aligned, so bail out and let this vector
+ // load be scalarized. Note that we may still be able to emit smaller
+ // vector loads. For example, if we are loading a <4 x float> with an
+ // alignment of 8, this check will fail but the legalizer will try again
+ // with 2 x <2 x float>, which will succeed with an alignment of 8.
+ return;
+ }
+
+ EVT EltVT = ResVT.getVectorElementType();
+ unsigned NumElts = ResVT.getVectorNumElements();
+
+ // Since LoadV2 is a target node, we cannot rely on DAG type legalization.
+ // Therefore, we must ensure the type is legal. For i1 and i8, we set the
+ // loaded type to i16 and propagate the "real" type as the memory type.
+ bool NeedTrunc = false;
+ if (EltVT.getSizeInBits() < 16) {
+ EltVT = MVT::i16;
+ NeedTrunc = true;
+ }
+
+ unsigned Opcode = 0;
+ SDVTList LdResVTs;
+ bool LoadF16x2 = false;
+
+ switch (NumElts) {
+ default:
+ return;
+ case 2:
+ Opcode = NVPTXISD::LoadV2;
+ LdResVTs = DAG.getVTList(EltVT, EltVT, MVT::Other);
+ break;
+ case 4: {
+ Opcode = NVPTXISD::LoadV4;
+ EVT ListVTs[] = { EltVT, EltVT, EltVT, EltVT, MVT::Other };
+ LdResVTs = DAG.getVTList(ListVTs);
+ break;
+ }
+ case 8: {
+ // v8f16 is a special case. PTX doesn't have ld.v8.f16
+ // instruction. Instead, we split the vector into v2f16 chunks and
+ // load them with ld.v4.b32.
+ assert(EltVT == MVT::f16 && "Unsupported v8 vector type.");
+ LoadF16x2 = true;
+ Opcode = NVPTXISD::LoadV4;
+ EVT ListVTs[] = {MVT::v2f16, MVT::v2f16, MVT::v2f16, MVT::v2f16,
+ MVT::Other};
+ LdResVTs = DAG.getVTList(ListVTs);
+ break;
+ }
+ }
+
+ // Copy regular operands
+ SmallVector<SDValue, 8> OtherOps(N->op_begin(), N->op_end());
+
+ // The select routine does not have access to the LoadSDNode instance, so
+ // pass along the extension information
+ OtherOps.push_back(DAG.getIntPtrConstant(LD->getExtensionType(), DL));
+
+ SDValue NewLD = DAG.getMemIntrinsicNode(Opcode, DL, LdResVTs, OtherOps,
+ LD->getMemoryVT(),
+ LD->getMemOperand());
+
+ SmallVector<SDValue, 8> ScalarRes;
+ if (LoadF16x2) {
+ // Split v2f16 subvectors back into individual elements.
+ NumElts /= 2;
+ for (unsigned i = 0; i < NumElts; ++i) {
+ SDValue SubVector = NewLD.getValue(i);
+ SDValue E0 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, EltVT, SubVector,
+ DAG.getIntPtrConstant(0, DL));
+ SDValue E1 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, EltVT, SubVector,
+ DAG.getIntPtrConstant(1, DL));
+ ScalarRes.push_back(E0);
+ ScalarRes.push_back(E1);
+ }
+ } else {
+ for (unsigned i = 0; i < NumElts; ++i) {
+ SDValue Res = NewLD.getValue(i);
+ if (NeedTrunc)
+ Res = DAG.getNode(ISD::TRUNCATE, DL, ResVT.getVectorElementType(), Res);
+ ScalarRes.push_back(Res);
+ }
+ }
+
+ SDValue LoadChain = NewLD.getValue(NumElts);
+
+ SDValue BuildVec = DAG.getBuildVector(ResVT, DL, ScalarRes);
+
+ Results.push_back(BuildVec);
+ Results.push_back(LoadChain);
+}
+
+static void ReplaceINTRINSIC_W_CHAIN(SDNode *N, SelectionDAG &DAG,
+ SmallVectorImpl<SDValue> &Results) {
+ SDValue Chain = N->getOperand(0);
+ SDValue Intrin = N->getOperand(1);
+ SDLoc DL(N);
+
+ // Get the intrinsic ID
+ unsigned IntrinNo = cast<ConstantSDNode>(Intrin.getNode())->getZExtValue();
+ switch (IntrinNo) {
+ default:
+ return;
+ case Intrinsic::nvvm_ldg_global_i:
+ case Intrinsic::nvvm_ldg_global_f:
+ case Intrinsic::nvvm_ldg_global_p:
+ case Intrinsic::nvvm_ldu_global_i:
+ case Intrinsic::nvvm_ldu_global_f:
+ case Intrinsic::nvvm_ldu_global_p: {
+ EVT ResVT = N->getValueType(0);
+
+ if (ResVT.isVector()) {
+ // Vector LDG/LDU
+
+ unsigned NumElts = ResVT.getVectorNumElements();
+ EVT EltVT = ResVT.getVectorElementType();
+
+ // Since LDU/LDG are target nodes, we cannot rely on DAG type
+ // legalization.
+ // Therefore, we must ensure the type is legal. For i1 and i8, we set the
+ // loaded type to i16 and propagate the "real" type as the memory type.
+ bool NeedTrunc = false;
+ if (EltVT.getSizeInBits() < 16) {
+ EltVT = MVT::i16;
+ NeedTrunc = true;
+ }
+
+ unsigned Opcode = 0;
+ SDVTList LdResVTs;
+
+ switch (NumElts) {
+ default:
+ return;
+ case 2:
+ switch (IntrinNo) {
+ default:
+ return;
+ case Intrinsic::nvvm_ldg_global_i:
+ case Intrinsic::nvvm_ldg_global_f:
+ case Intrinsic::nvvm_ldg_global_p:
+ Opcode = NVPTXISD::LDGV2;
+ break;
+ case Intrinsic::nvvm_ldu_global_i:
+ case Intrinsic::nvvm_ldu_global_f:
+ case Intrinsic::nvvm_ldu_global_p:
+ Opcode = NVPTXISD::LDUV2;
+ break;
+ }
+ LdResVTs = DAG.getVTList(EltVT, EltVT, MVT::Other);
+ break;
+ case 4: {
+ switch (IntrinNo) {
+ default:
+ return;
+ case Intrinsic::nvvm_ldg_global_i:
+ case Intrinsic::nvvm_ldg_global_f:
+ case Intrinsic::nvvm_ldg_global_p:
+ Opcode = NVPTXISD::LDGV4;
+ break;
+ case Intrinsic::nvvm_ldu_global_i:
+ case Intrinsic::nvvm_ldu_global_f:
+ case Intrinsic::nvvm_ldu_global_p:
+ Opcode = NVPTXISD::LDUV4;
+ break;
+ }
+ EVT ListVTs[] = { EltVT, EltVT, EltVT, EltVT, MVT::Other };
+ LdResVTs = DAG.getVTList(ListVTs);
+ break;
+ }
+ }
+
+ SmallVector<SDValue, 8> OtherOps;
+
+ // Copy regular operands
+
+ OtherOps.push_back(Chain); // Chain
+ // Skip operand 1 (intrinsic ID)
+ // Others
+ OtherOps.append(N->op_begin() + 2, N->op_end());
+
+ MemIntrinsicSDNode *MemSD = cast<MemIntrinsicSDNode>(N);
+
+ SDValue NewLD = DAG.getMemIntrinsicNode(Opcode, DL, LdResVTs, OtherOps,
+ MemSD->getMemoryVT(),
+ MemSD->getMemOperand());
+
+ SmallVector<SDValue, 4> ScalarRes;
+
+ for (unsigned i = 0; i < NumElts; ++i) {
+ SDValue Res = NewLD.getValue(i);
+ if (NeedTrunc)
+ Res =
+ DAG.getNode(ISD::TRUNCATE, DL, ResVT.getVectorElementType(), Res);
+ ScalarRes.push_back(Res);
+ }
+
+ SDValue LoadChain = NewLD.getValue(NumElts);
+
+ SDValue BuildVec =
+ DAG.getBuildVector(ResVT, DL, ScalarRes);
+
+ Results.push_back(BuildVec);
+ Results.push_back(LoadChain);
+ } else {
+ // i8 LDG/LDU
+ assert(ResVT.isSimple() && ResVT.getSimpleVT().SimpleTy == MVT::i8 &&
+ "Custom handling of non-i8 ldu/ldg?");
+
+ // Just copy all operands as-is
+ SmallVector<SDValue, 4> Ops(N->op_begin(), N->op_end());
+
+ // Force output to i16
+ SDVTList LdResVTs = DAG.getVTList(MVT::i16, MVT::Other);
+
+ MemIntrinsicSDNode *MemSD = cast<MemIntrinsicSDNode>(N);
+
+ // We make sure the memory type is i8, which will be used during isel
+ // to select the proper instruction.
+ SDValue NewLD =
+ DAG.getMemIntrinsicNode(ISD::INTRINSIC_W_CHAIN, DL, LdResVTs, Ops,
+ MVT::i8, MemSD->getMemOperand());
+
+ Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i8,
+ NewLD.getValue(0)));
+ Results.push_back(NewLD.getValue(1));
+ }
+ }
+ }
+}
+
+void NVPTXTargetLowering::ReplaceNodeResults(
+ SDNode *N, SmallVectorImpl<SDValue> &Results, SelectionDAG &DAG) const {
+ switch (N->getOpcode()) {
+ default:
+ report_fatal_error("Unhandled custom legalization");
+ case ISD::LOAD:
+ ReplaceLoadVector(N, DAG, Results);
+ return;
+ case ISD::INTRINSIC_W_CHAIN:
+ ReplaceINTRINSIC_W_CHAIN(N, DAG, Results);
+ return;
+ }
+}
+
+// Pin NVPTXSection's and NVPTXTargetObjectFile's vtables to this file.
+void NVPTXSection::anchor() {}
+
+NVPTXTargetObjectFile::~NVPTXTargetObjectFile() {
+ delete static_cast<NVPTXSection *>(TextSection);
+ delete static_cast<NVPTXSection *>(DataSection);
+ delete static_cast<NVPTXSection *>(BSSSection);
+ delete static_cast<NVPTXSection *>(ReadOnlySection);
+
+ delete static_cast<NVPTXSection *>(StaticCtorSection);
+ delete static_cast<NVPTXSection *>(StaticDtorSection);
+ delete static_cast<NVPTXSection *>(LSDASection);
+ delete static_cast<NVPTXSection *>(EHFrameSection);
+ delete static_cast<NVPTXSection *>(DwarfAbbrevSection);
+ delete static_cast<NVPTXSection *>(DwarfInfoSection);
+ delete static_cast<NVPTXSection *>(DwarfLineSection);
+ delete static_cast<NVPTXSection *>(DwarfFrameSection);
+ delete static_cast<NVPTXSection *>(DwarfPubTypesSection);
+ delete static_cast<const NVPTXSection *>(DwarfDebugInlineSection);
+ delete static_cast<NVPTXSection *>(DwarfStrSection);
+ delete static_cast<NVPTXSection *>(DwarfLocSection);
+ delete static_cast<NVPTXSection *>(DwarfARangesSection);
+ delete static_cast<NVPTXSection *>(DwarfRangesSection);
+ delete static_cast<NVPTXSection *>(DwarfMacinfoSection);
+}
+
+MCSection *NVPTXTargetObjectFile::SelectSectionForGlobal(
+ const GlobalObject *GO, SectionKind Kind, const TargetMachine &TM) const {
+ return getDataSection();
+}
diff --git a/lib/Target/NVPTX/NVPTXInstrInfo.td b/lib/Target/NVPTX/NVPTXInstrInfo.td
index 2b847414b8a8..9378b29a9d0e 100644
--- a/lib/Target/NVPTX/NVPTXInstrInfo.td
+++ b/lib/Target/NVPTX/NVPTXInstrInfo.td
@@ -1,3165 +1,3164 @@
-//===- NVPTXInstrInfo.td - NVPTX Instruction defs -------------*- tblgen-*-===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// This file describes the PTX instructions in TableGen format.
-//
-//===----------------------------------------------------------------------===//
-
-include "NVPTXInstrFormats.td"
-
-// A NOP instruction
-let hasSideEffects = 0 in {
- def NOP : NVPTXInst<(outs), (ins), "", []>;
-}
-
-let OperandType = "OPERAND_IMMEDIATE" in {
- def f16imm : Operand<f16>;
-}
-
-// List of vector specific properties
-def isVecLD : VecInstTypeEnum<1>;
-def isVecST : VecInstTypeEnum<2>;
-def isVecBuild : VecInstTypeEnum<3>;
-def isVecShuffle : VecInstTypeEnum<4>;
-def isVecExtract : VecInstTypeEnum<5>;
-def isVecInsert : VecInstTypeEnum<6>;
-def isVecDest : VecInstTypeEnum<7>;
-def isVecOther : VecInstTypeEnum<15>;
-
-//===----------------------------------------------------------------------===//
-// NVPTX Operand Definitions.
-//===----------------------------------------------------------------------===//
-
-def brtarget : Operand<OtherVT>;
-
-// CVT conversion modes
-// These must match the enum in NVPTX.h
-def CvtNONE : PatLeaf<(i32 0x0)>;
-def CvtRNI : PatLeaf<(i32 0x1)>;
-def CvtRZI : PatLeaf<(i32 0x2)>;
-def CvtRMI : PatLeaf<(i32 0x3)>;
-def CvtRPI : PatLeaf<(i32 0x4)>;
-def CvtRN : PatLeaf<(i32 0x5)>;
-def CvtRZ : PatLeaf<(i32 0x6)>;
-def CvtRM : PatLeaf<(i32 0x7)>;
-def CvtRP : PatLeaf<(i32 0x8)>;
-
-def CvtNONE_FTZ : PatLeaf<(i32 0x10)>;
-def CvtRNI_FTZ : PatLeaf<(i32 0x11)>;
-def CvtRZI_FTZ : PatLeaf<(i32 0x12)>;
-def CvtRMI_FTZ : PatLeaf<(i32 0x13)>;
-def CvtRPI_FTZ : PatLeaf<(i32 0x14)>;
-def CvtRN_FTZ : PatLeaf<(i32 0x15)>;
-def CvtRZ_FTZ : PatLeaf<(i32 0x16)>;
-def CvtRM_FTZ : PatLeaf<(i32 0x17)>;
-def CvtRP_FTZ : PatLeaf<(i32 0x18)>;
-
-def CvtSAT : PatLeaf<(i32 0x20)>;
-def CvtSAT_FTZ : PatLeaf<(i32 0x30)>;
-
-def CvtMode : Operand<i32> {
- let PrintMethod = "printCvtMode";
-}
-
-// Compare modes
-// These must match the enum in NVPTX.h
-def CmpEQ : PatLeaf<(i32 0)>;
-def CmpNE : PatLeaf<(i32 1)>;
-def CmpLT : PatLeaf<(i32 2)>;
-def CmpLE : PatLeaf<(i32 3)>;
-def CmpGT : PatLeaf<(i32 4)>;
-def CmpGE : PatLeaf<(i32 5)>;
-def CmpEQU : PatLeaf<(i32 10)>;
-def CmpNEU : PatLeaf<(i32 11)>;
-def CmpLTU : PatLeaf<(i32 12)>;
-def CmpLEU : PatLeaf<(i32 13)>;
-def CmpGTU : PatLeaf<(i32 14)>;
-def CmpGEU : PatLeaf<(i32 15)>;
-def CmpNUM : PatLeaf<(i32 16)>;
-def CmpNAN : PatLeaf<(i32 17)>;
-
-def CmpEQ_FTZ : PatLeaf<(i32 0x100)>;
-def CmpNE_FTZ : PatLeaf<(i32 0x101)>;
-def CmpLT_FTZ : PatLeaf<(i32 0x102)>;
-def CmpLE_FTZ : PatLeaf<(i32 0x103)>;
-def CmpGT_FTZ : PatLeaf<(i32 0x104)>;
-def CmpGE_FTZ : PatLeaf<(i32 0x105)>;
-def CmpEQU_FTZ : PatLeaf<(i32 0x10A)>;
-def CmpNEU_FTZ : PatLeaf<(i32 0x10B)>;
-def CmpLTU_FTZ : PatLeaf<(i32 0x10C)>;
-def CmpLEU_FTZ : PatLeaf<(i32 0x10D)>;
-def CmpGTU_FTZ : PatLeaf<(i32 0x10E)>;
-def CmpGEU_FTZ : PatLeaf<(i32 0x10F)>;
-def CmpNUM_FTZ : PatLeaf<(i32 0x110)>;
-def CmpNAN_FTZ : PatLeaf<(i32 0x111)>;
-
-def CmpMode : Operand<i32> {
- let PrintMethod = "printCmpMode";
-}
-def VecElement : Operand<i32> {
- let PrintMethod = "printVecElement";
-}
-
-//===----------------------------------------------------------------------===//
-// NVPTX Instruction Predicate Definitions
-//===----------------------------------------------------------------------===//
-
-
-def hasAtomRedG32 : Predicate<"Subtarget->hasAtomRedG32()">;
-def hasAtomRedS32 : Predicate<"Subtarget->hasAtomRedS32()">;
-def hasAtomRedGen32 : Predicate<"Subtarget->hasAtomRedGen32()">;
-def useAtomRedG32forGen32 :
- Predicate<"!Subtarget->hasAtomRedGen32() && Subtarget->hasAtomRedG32()">;
-def hasBrkPt : Predicate<"Subtarget->hasBrkPt()">;
-def hasAtomRedG64 : Predicate<"Subtarget->hasAtomRedG64()">;
-def hasAtomRedS64 : Predicate<"Subtarget->hasAtomRedS64()">;
-def hasAtomRedGen64 : Predicate<"Subtarget->hasAtomRedGen64()">;
-def useAtomRedG64forGen64 :
- Predicate<"!Subtarget->hasAtomRedGen64() && Subtarget->hasAtomRedG64()">;
-def hasAtomAddF32 : Predicate<"Subtarget->hasAtomAddF32()">;
-def hasAtomAddF64 : Predicate<"Subtarget->hasAtomAddF64()">;
-def hasAtomScope : Predicate<"Subtarget->hasAtomScope()">;
-def hasAtomBitwise64 : Predicate<"Subtarget->hasAtomBitwise64()">;
-def hasAtomMinMax64 : Predicate<"Subtarget->hasAtomMinMax64()">;
-def hasVote : Predicate<"Subtarget->hasVote()">;
-def hasDouble : Predicate<"Subtarget->hasDouble()">;
-def reqPTX20 : Predicate<"Subtarget->reqPTX20()">;
-def hasLDG : Predicate<"Subtarget->hasLDG()">;
-def hasLDU : Predicate<"Subtarget->hasLDU()">;
-def hasGenericLdSt : Predicate<"Subtarget->hasGenericLdSt()">;
-
-def doF32FTZ : Predicate<"useF32FTZ()">;
-def doNoF32FTZ : Predicate<"!useF32FTZ()">;
-
-def doMulWide : Predicate<"doMulWide">;
-
-def allowFMA : Predicate<"allowFMA()">;
-def noFMA : Predicate<"!allowFMA()">;
-def allowUnsafeFPMath : Predicate<"allowUnsafeFPMath()">;
-
-def do_DIVF32_APPROX : Predicate<"getDivF32Level()==0">;
-def do_DIVF32_FULL : Predicate<"getDivF32Level()==1">;
-
-def do_SQRTF32_APPROX : Predicate<"!usePrecSqrtF32()">;
-def do_SQRTF32_RN : Predicate<"usePrecSqrtF32()">;
-
-def hasHWROT32 : Predicate<"Subtarget->hasHWROT32()">;
-def noHWROT32 : Predicate<"!Subtarget->hasHWROT32()">;
-
-def true : Predicate<"true">;
-
-def hasPTX31 : Predicate<"Subtarget->getPTXVersion() >= 31">;
-
-def useFP16Math: Predicate<"Subtarget->allowFP16Math()">;
-
-//===----------------------------------------------------------------------===//
-// Some Common Instruction Class Templates
-//===----------------------------------------------------------------------===//
-
-// Template for instructions which take three int64, int32, or int16 args.
-// The instructions are named "<OpcStr><Width>" (e.g. "add.s64").
-multiclass I3<string OpcStr, SDNode OpNode> {
- def i64rr :
- NVPTXInst<(outs Int64Regs:$dst), (ins Int64Regs:$a, Int64Regs:$b),
- !strconcat(OpcStr, "64 \t$dst, $a, $b;"),
- [(set Int64Regs:$dst, (OpNode Int64Regs:$a, Int64Regs:$b))]>;
- def i64ri :
- NVPTXInst<(outs Int64Regs:$dst), (ins Int64Regs:$a, i64imm:$b),
- !strconcat(OpcStr, "64 \t$dst, $a, $b;"),
- [(set Int64Regs:$dst, (OpNode Int64Regs:$a, imm:$b))]>;
- def i32rr :
- NVPTXInst<(outs Int32Regs:$dst), (ins Int32Regs:$a, Int32Regs:$b),
- !strconcat(OpcStr, "32 \t$dst, $a, $b;"),
- [(set Int32Regs:$dst, (OpNode Int32Regs:$a, Int32Regs:$b))]>;
- def i32ri :
- NVPTXInst<(outs Int32Regs:$dst), (ins Int32Regs:$a, i32imm:$b),
- !strconcat(OpcStr, "32 \t$dst, $a, $b;"),
- [(set Int32Regs:$dst, (OpNode Int32Regs:$a, imm:$b))]>;
- def i16rr :
- NVPTXInst<(outs Int16Regs:$dst), (ins Int16Regs:$a, Int16Regs:$b),
- !strconcat(OpcStr, "16 \t$dst, $a, $b;"),
- [(set Int16Regs:$dst, (OpNode Int16Regs:$a, Int16Regs:$b))]>;
- def i16ri :
- NVPTXInst<(outs Int16Regs:$dst), (ins Int16Regs:$a, i16imm:$b),
- !strconcat(OpcStr, "16 \t$dst, $a, $b;"),
- [(set Int16Regs:$dst, (OpNode Int16Regs:$a, (imm):$b))]>;
-}
-
-// Template for instructions which take 3 int32 args. The instructions are
-// named "<OpcStr>.s32" (e.g. "addc.cc.s32").
-multiclass ADD_SUB_INT_32<string OpcStr, SDNode OpNode> {
- def i32rr :
- NVPTXInst<(outs Int32Regs:$dst), (ins Int32Regs:$a, Int32Regs:$b),
- !strconcat(OpcStr, ".s32 \t$dst, $a, $b;"),
- [(set Int32Regs:$dst, (OpNode Int32Regs:$a, Int32Regs:$b))]>;
- def i32ri :
- NVPTXInst<(outs Int32Regs:$dst), (ins Int32Regs:$a, i32imm:$b),
- !strconcat(OpcStr, ".s32 \t$dst, $a, $b;"),
- [(set Int32Regs:$dst, (OpNode Int32Regs:$a, imm:$b))]>;
-}
-
-// Template for instructions which take three fp64 or fp32 args. The
-// instructions are named "<OpcStr>.f<Width>" (e.g. "min.f64").
-//
-// Also defines ftz (flush subnormal inputs and results to sign-preserving
-// zero) variants for fp32 functions.
-//
-// This multiclass should be used for nodes that cannot be folded into FMAs.
-// For nodes that can be folded into FMAs (i.e. adds and muls), use
-// F3_fma_component.
-multiclass F3<string OpcStr, SDNode OpNode> {
- def f64rr :
- NVPTXInst<(outs Float64Regs:$dst),
- (ins Float64Regs:$a, Float64Regs:$b),
- !strconcat(OpcStr, ".f64 \t$dst, $a, $b;"),
- [(set Float64Regs:$dst, (OpNode Float64Regs:$a, Float64Regs:$b))]>;
- def f64ri :
- NVPTXInst<(outs Float64Regs:$dst),
- (ins Float64Regs:$a, f64imm:$b),
- !strconcat(OpcStr, ".f64 \t$dst, $a, $b;"),
- [(set Float64Regs:$dst, (OpNode Float64Regs:$a, fpimm:$b))]>;
- def f32rr_ftz :
- NVPTXInst<(outs Float32Regs:$dst),
- (ins Float32Regs:$a, Float32Regs:$b),
- !strconcat(OpcStr, ".ftz.f32 \t$dst, $a, $b;"),
- [(set Float32Regs:$dst, (OpNode Float32Regs:$a, Float32Regs:$b))]>,
- Requires<[doF32FTZ]>;
- def f32ri_ftz :
- NVPTXInst<(outs Float32Regs:$dst),
- (ins Float32Regs:$a, f32imm:$b),
- !strconcat(OpcStr, ".ftz.f32 \t$dst, $a, $b;"),
- [(set Float32Regs:$dst, (OpNode Float32Regs:$a, fpimm:$b))]>,
- Requires<[doF32FTZ]>;
- def f32rr :
- NVPTXInst<(outs Float32Regs:$dst),
- (ins Float32Regs:$a, Float32Regs:$b),
- !strconcat(OpcStr, ".f32 \t$dst, $a, $b;"),
- [(set Float32Regs:$dst, (OpNode Float32Regs:$a, Float32Regs:$b))]>;
- def f32ri :
- NVPTXInst<(outs Float32Regs:$dst),
- (ins Float32Regs:$a, f32imm:$b),
- !strconcat(OpcStr, ".f32 \t$dst, $a, $b;"),
- [(set Float32Regs:$dst, (OpNode Float32Regs:$a, fpimm:$b))]>;
-}
-
-// Template for instructions which take three FP args. The
-// instructions are named "<OpcStr>.f<Width>" (e.g. "add.f64").
-//
-// Also defines ftz (flush subnormal inputs and results to sign-preserving
-// zero) variants for fp32/fp16 functions.
-//
-// This multiclass should be used for nodes that can be folded to make fma ops.
-// In this case, we use the ".rn" variant when FMA is disabled, as this behaves
-// just like the non ".rn" op, but prevents ptxas from creating FMAs.
-multiclass F3_fma_component<string OpcStr, SDNode OpNode> {
- def f64rr :
- NVPTXInst<(outs Float64Regs:$dst),
- (ins Float64Regs:$a, Float64Regs:$b),
- !strconcat(OpcStr, ".f64 \t$dst, $a, $b;"),
- [(set Float64Regs:$dst, (OpNode Float64Regs:$a, Float64Regs:$b))]>,
- Requires<[allowFMA]>;
- def f64ri :
- NVPTXInst<(outs Float64Regs:$dst),
- (ins Float64Regs:$a, f64imm:$b),
- !strconcat(OpcStr, ".f64 \t$dst, $a, $b;"),
- [(set Float64Regs:$dst, (OpNode Float64Regs:$a, fpimm:$b))]>,
- Requires<[allowFMA]>;
- def f32rr_ftz :
- NVPTXInst<(outs Float32Regs:$dst),
- (ins Float32Regs:$a, Float32Regs:$b),
- !strconcat(OpcStr, ".ftz.f32 \t$dst, $a, $b;"),
- [(set Float32Regs:$dst, (OpNode Float32Regs:$a, Float32Regs:$b))]>,
- Requires<[allowFMA, doF32FTZ]>;
- def f32ri_ftz :
- NVPTXInst<(outs Float32Regs:$dst),
- (ins Float32Regs:$a, f32imm:$b),
- !strconcat(OpcStr, ".ftz.f32 \t$dst, $a, $b;"),
- [(set Float32Regs:$dst, (OpNode Float32Regs:$a, fpimm:$b))]>,
- Requires<[allowFMA, doF32FTZ]>;
- def f32rr :
- NVPTXInst<(outs Float32Regs:$dst),
- (ins Float32Regs:$a, Float32Regs:$b),
- !strconcat(OpcStr, ".f32 \t$dst, $a, $b;"),
- [(set Float32Regs:$dst, (OpNode Float32Regs:$a, Float32Regs:$b))]>,
- Requires<[allowFMA]>;
- def f32ri :
- NVPTXInst<(outs Float32Regs:$dst),
- (ins Float32Regs:$a, f32imm:$b),
- !strconcat(OpcStr, ".f32 \t$dst, $a, $b;"),
- [(set Float32Regs:$dst, (OpNode Float32Regs:$a, fpimm:$b))]>,
- Requires<[allowFMA]>;
-
- def f16rr_ftz :
- NVPTXInst<(outs Float16Regs:$dst),
- (ins Float16Regs:$a, Float16Regs:$b),
- !strconcat(OpcStr, ".ftz.f16 \t$dst, $a, $b;"),
- [(set Float16Regs:$dst, (OpNode Float16Regs:$a, Float16Regs:$b))]>,
- Requires<[useFP16Math, allowFMA, doF32FTZ]>;
- def f16rr :
- NVPTXInst<(outs Float16Regs:$dst),
- (ins Float16Regs:$a, Float16Regs:$b),
- !strconcat(OpcStr, ".f16 \t$dst, $a, $b;"),
- [(set Float16Regs:$dst, (OpNode Float16Regs:$a, Float16Regs:$b))]>,
- Requires<[useFP16Math, allowFMA]>;
-
- def f16x2rr_ftz :
- NVPTXInst<(outs Float16x2Regs:$dst),
- (ins Float16x2Regs:$a, Float16x2Regs:$b),
- !strconcat(OpcStr, ".ftz.f16x2 \t$dst, $a, $b;"),
- [(set Float16x2Regs:$dst, (OpNode Float16x2Regs:$a, Float16x2Regs:$b))]>,
- Requires<[useFP16Math, allowFMA, doF32FTZ]>;
- def f16x2rr :
- NVPTXInst<(outs Float16x2Regs:$dst),
- (ins Float16x2Regs:$a, Float16x2Regs:$b),
- !strconcat(OpcStr, ".f16x2 \t$dst, $a, $b;"),
- [(set Float16x2Regs:$dst, (OpNode Float16x2Regs:$a, Float16x2Regs:$b))]>,
- Requires<[useFP16Math, allowFMA]>;
-
- // These have strange names so we don't perturb existing mir tests.
- def _rnf64rr :
- NVPTXInst<(outs Float64Regs:$dst),
- (ins Float64Regs:$a, Float64Regs:$b),
- !strconcat(OpcStr, ".rn.f64 \t$dst, $a, $b;"),
- [(set Float64Regs:$dst, (OpNode Float64Regs:$a, Float64Regs:$b))]>,
- Requires<[noFMA]>;
- def _rnf64ri :
- NVPTXInst<(outs Float64Regs:$dst),
- (ins Float64Regs:$a, f64imm:$b),
- !strconcat(OpcStr, ".rn.f64 \t$dst, $a, $b;"),
- [(set Float64Regs:$dst, (OpNode Float64Regs:$a, fpimm:$b))]>,
- Requires<[noFMA]>;
- def _rnf32rr_ftz :
- NVPTXInst<(outs Float32Regs:$dst),
- (ins Float32Regs:$a, Float32Regs:$b),
- !strconcat(OpcStr, ".rn.ftz.f32 \t$dst, $a, $b;"),
- [(set Float32Regs:$dst, (OpNode Float32Regs:$a, Float32Regs:$b))]>,
- Requires<[noFMA, doF32FTZ]>;
- def _rnf32ri_ftz :
- NVPTXInst<(outs Float32Regs:$dst),
- (ins Float32Regs:$a, f32imm:$b),
- !strconcat(OpcStr, ".rn.ftz.f32 \t$dst, $a, $b;"),
- [(set Float32Regs:$dst, (OpNode Float32Regs:$a, fpimm:$b))]>,
- Requires<[noFMA, doF32FTZ]>;
- def _rnf32rr :
- NVPTXInst<(outs Float32Regs:$dst),
- (ins Float32Regs:$a, Float32Regs:$b),
- !strconcat(OpcStr, ".rn.f32 \t$dst, $a, $b;"),
- [(set Float32Regs:$dst, (OpNode Float32Regs:$a, Float32Regs:$b))]>,
- Requires<[noFMA]>;
- def _rnf32ri :
- NVPTXInst<(outs Float32Regs:$dst),
- (ins Float32Regs:$a, f32imm:$b),
- !strconcat(OpcStr, ".rn.f32 \t$dst, $a, $b;"),
- [(set Float32Regs:$dst, (OpNode Float32Regs:$a, fpimm:$b))]>,
- Requires<[noFMA]>;
- def _rnf16rr_ftz :
- NVPTXInst<(outs Float16Regs:$dst),
- (ins Float16Regs:$a, Float16Regs:$b),
- !strconcat(OpcStr, ".rn.ftz.f16 \t$dst, $a, $b;"),
- [(set Float16Regs:$dst, (OpNode Float16Regs:$a, Float16Regs:$b))]>,
- Requires<[useFP16Math, noFMA, doF32FTZ]>;
- def _rnf16rr :
- NVPTXInst<(outs Float16Regs:$dst),
- (ins Float16Regs:$a, Float16Regs:$b),
- !strconcat(OpcStr, ".rn.f16 \t$dst, $a, $b;"),
- [(set Float16Regs:$dst, (OpNode Float16Regs:$a, Float16Regs:$b))]>,
- Requires<[useFP16Math, noFMA]>;
- def _rnf16x2rr_ftz :
- NVPTXInst<(outs Float16x2Regs:$dst),
- (ins Float16x2Regs:$a, Float16x2Regs:$b),
- !strconcat(OpcStr, ".rn.ftz.f16x2 \t$dst, $a, $b;"),
- [(set Float16x2Regs:$dst, (OpNode Float16x2Regs:$a, Float16x2Regs:$b))]>,
- Requires<[useFP16Math, noFMA, doF32FTZ]>;
- def _rnf16x2rr :
- NVPTXInst<(outs Float16x2Regs:$dst),
- (ins Float16x2Regs:$a, Float16x2Regs:$b),
- !strconcat(OpcStr, ".rn.f16x2 \t$dst, $a, $b;"),
- [(set Float16x2Regs:$dst, (OpNode Float16x2Regs:$a, Float16x2Regs:$b))]>,
- Requires<[useFP16Math, noFMA]>;
-}
-
-// Template for operations which take two f32 or f64 operands. Provides three
-// instructions: <OpcStr>.f64, <OpcStr>.f32, and <OpcStr>.ftz.f32 (flush
-// subnormal inputs and results to zero).
-multiclass F2<string OpcStr, SDNode OpNode> {
- def f64 : NVPTXInst<(outs Float64Regs:$dst), (ins Float64Regs:$a),
- !strconcat(OpcStr, ".f64 \t$dst, $a;"),
- [(set Float64Regs:$dst, (OpNode Float64Regs:$a))]>;
- def f32_ftz : NVPTXInst<(outs Float32Regs:$dst), (ins Float32Regs:$a),
- !strconcat(OpcStr, ".ftz.f32 \t$dst, $a;"),
- [(set Float32Regs:$dst, (OpNode Float32Regs:$a))]>,
- Requires<[doF32FTZ]>;
- def f32 : NVPTXInst<(outs Float32Regs:$dst), (ins Float32Regs:$a),
- !strconcat(OpcStr, ".f32 \t$dst, $a;"),
- [(set Float32Regs:$dst, (OpNode Float32Regs:$a))]>;
-}
-
-//===----------------------------------------------------------------------===//
-// NVPTX Instructions.
-//===----------------------------------------------------------------------===//
-
-//-----------------------------------
-// Type Conversion
-//-----------------------------------
-
-let hasSideEffects = 0 in {
- // Generate a cvt to the given type from all possible types. Each instance
- // takes a CvtMode immediate that defines the conversion mode to use. It can
- // be CvtNONE to omit a conversion mode.
- multiclass CVT_FROM_ALL<string FromName, RegisterClass RC> {
- def _s8 :
- NVPTXInst<(outs RC:$dst),
- (ins Int16Regs:$src, CvtMode:$mode),
- !strconcat("cvt${mode:base}${mode:ftz}${mode:sat}.",
- FromName, ".s8 \t$dst, $src;"), []>;
- def _u8 :
- NVPTXInst<(outs RC:$dst),
- (ins Int16Regs:$src, CvtMode:$mode),
- !strconcat("cvt${mode:base}${mode:ftz}${mode:sat}.",
- FromName, ".u8 \t$dst, $src;"), []>;
- def _s16 :
- NVPTXInst<(outs RC:$dst),
- (ins Int16Regs:$src, CvtMode:$mode),
- !strconcat("cvt${mode:base}${mode:ftz}${mode:sat}.",
- FromName, ".s16 \t$dst, $src;"), []>;
- def _u16 :
- NVPTXInst<(outs RC:$dst),
- (ins Int16Regs:$src, CvtMode:$mode),
- !strconcat("cvt${mode:base}${mode:ftz}${mode:sat}.",
- FromName, ".u16 \t$dst, $src;"), []>;
- def _s32 :
- NVPTXInst<(outs RC:$dst),
- (ins Int32Regs:$src, CvtMode:$mode),
- !strconcat("cvt${mode:base}${mode:ftz}${mode:sat}.",
- FromName, ".s32 \t$dst, $src;"), []>;
- def _u32 :
- NVPTXInst<(outs RC:$dst),
- (ins Int32Regs:$src, CvtMode:$mode),
- !strconcat("cvt${mode:base}${mode:ftz}${mode:sat}.",
- FromName, ".u32 \t$dst, $src;"), []>;
- def _s64 :
- NVPTXInst<(outs RC:$dst),
- (ins Int64Regs:$src, CvtMode:$mode),
- !strconcat("cvt${mode:base}${mode:ftz}${mode:sat}.",
- FromName, ".s64 \t$dst, $src;"), []>;
- def _u64 :
- NVPTXInst<(outs RC:$dst),
- (ins Int64Regs:$src, CvtMode:$mode),
- !strconcat("cvt${mode:base}${mode:ftz}${mode:sat}.",
- FromName, ".u64 \t$dst, $src;"), []>;
- def _f16 :
- NVPTXInst<(outs RC:$dst),
- (ins Float16Regs:$src, CvtMode:$mode),
- !strconcat("cvt${mode:base}${mode:ftz}${mode:sat}.",
- FromName, ".f16 \t$dst, $src;"), []>;
- def _f32 :
- NVPTXInst<(outs RC:$dst),
- (ins Float32Regs:$src, CvtMode:$mode),
- !strconcat("cvt${mode:base}${mode:ftz}${mode:sat}.",
- FromName, ".f32 \t$dst, $src;"), []>;
- def _f64 :
- NVPTXInst<(outs RC:$dst),
- (ins Float64Regs:$src, CvtMode:$mode),
- !strconcat("cvt${mode:base}${mode:ftz}${mode:sat}.",
- FromName, ".f64 \t$dst, $src;"), []>;
- }
-
- // Generate cvts from all types to all types.
- defm CVT_s8 : CVT_FROM_ALL<"s8", Int16Regs>;
- defm CVT_u8 : CVT_FROM_ALL<"u8", Int16Regs>;
- defm CVT_s16 : CVT_FROM_ALL<"s16", Int16Regs>;
- defm CVT_u16 : CVT_FROM_ALL<"u16", Int16Regs>;
- defm CVT_s32 : CVT_FROM_ALL<"s32", Int32Regs>;
- defm CVT_u32 : CVT_FROM_ALL<"u32", Int32Regs>;
- defm CVT_s64 : CVT_FROM_ALL<"s64", Int64Regs>;
- defm CVT_u64 : CVT_FROM_ALL<"u64", Int64Regs>;
- defm CVT_f16 : CVT_FROM_ALL<"f16", Float16Regs>;
- defm CVT_f32 : CVT_FROM_ALL<"f32", Float32Regs>;
- defm CVT_f64 : CVT_FROM_ALL<"f64", Float64Regs>;
-
- // These cvts are different from those above: The source and dest registers
- // are of the same type.
- def CVT_INREG_s16_s8 : NVPTXInst<(outs Int16Regs:$dst), (ins Int16Regs:$src),
- "cvt.s16.s8 \t$dst, $src;", []>;
- def CVT_INREG_s32_s8 : NVPTXInst<(outs Int32Regs:$dst), (ins Int32Regs:$src),
- "cvt.s32.s8 \t$dst, $src;", []>;
- def CVT_INREG_s32_s16 : NVPTXInst<(outs Int32Regs:$dst), (ins Int32Regs:$src),
- "cvt.s32.s16 \t$dst, $src;", []>;
- def CVT_INREG_s64_s8 : NVPTXInst<(outs Int64Regs:$dst), (ins Int64Regs:$src),
- "cvt.s64.s8 \t$dst, $src;", []>;
- def CVT_INREG_s64_s16 : NVPTXInst<(outs Int64Regs:$dst), (ins Int64Regs:$src),
- "cvt.s64.s16 \t$dst, $src;", []>;
- def CVT_INREG_s64_s32 : NVPTXInst<(outs Int64Regs:$dst), (ins Int64Regs:$src),
- "cvt.s64.s32 \t$dst, $src;", []>;
-}
-
-//-----------------------------------
-// Integer Arithmetic
-//-----------------------------------
-
-// Template for xor masquerading as int1 arithmetic.
-multiclass ADD_SUB_i1<SDNode OpNode> {
- def _rr: NVPTXInst<(outs Int1Regs:$dst), (ins Int1Regs:$a, Int1Regs:$b),
- "xor.pred \t$dst, $a, $b;",
- [(set Int1Regs:$dst, (OpNode Int1Regs:$a, Int1Regs:$b))]>;
- def _ri: NVPTXInst<(outs Int1Regs:$dst), (ins Int1Regs:$a, i1imm:$b),
- "xor.pred \t$dst, $a, $b;",
- [(set Int1Regs:$dst, (OpNode Int1Regs:$a, (imm):$b))]>;
-}
-
-// int1 addition and subtraction are both just xor.
-defm ADD_i1 : ADD_SUB_i1<add>;
-defm SUB_i1 : ADD_SUB_i1<sub>;
-
-// int16, int32, and int64 signed addition. Since nvptx is 2's complement, we
-// also use these for unsigned arithmetic.
-defm ADD : I3<"add.s", add>;
-defm SUB : I3<"sub.s", sub>;
-
-// int32 addition and subtraction with carry-out.
-// FIXME: PTX 4.3 adds a 64-bit add.cc (and maybe also 64-bit addc.cc?).
-defm ADDCC : ADD_SUB_INT_32<"add.cc", addc>;
-defm SUBCC : ADD_SUB_INT_32<"sub.cc", subc>;
-
-// int32 addition and subtraction with carry-in and carry-out.
-defm ADDCCC : ADD_SUB_INT_32<"addc.cc", adde>;
-defm SUBCCC : ADD_SUB_INT_32<"subc.cc", sube>;
-
-defm MULT : I3<"mul.lo.s", mul>;
-
-defm MULTHS : I3<"mul.hi.s", mulhs>;
-defm MULTHU : I3<"mul.hi.u", mulhu>;
-
-defm SDIV : I3<"div.s", sdiv>;
-defm UDIV : I3<"div.u", udiv>;
-
-// The ri versions of rem.s and rem.u won't be selected; DAGCombiner::visitSREM
-// will lower it.
-defm SREM : I3<"rem.s", srem>;
-defm UREM : I3<"rem.u", urem>;
-
-// Integer absolute value. NumBits should be one minus the bit width of RC.
-// This idiom implements the algorithm at
-// http://graphics.stanford.edu/~seander/bithacks.html#IntegerAbs.
-multiclass ABS<RegisterClass RC, int NumBits, string SizeName> {
- def : NVPTXInst<(outs RC:$dst), (ins RC:$a),
- !strconcat("abs", SizeName, " \t$dst, $a;"),
- [(set RC:$dst, (xor (add (sra RC:$a, (i32 NumBits)), RC:$a),
- (sra RC:$a, (i32 NumBits))))]>;
-}
-defm ABS_16 : ABS<Int16Regs, 15, ".s16">;
-defm ABS_32 : ABS<Int32Regs, 31, ".s32">;
-defm ABS_64 : ABS<Int64Regs, 63, ".s64">;
-
-// Integer min/max.
-defm SMAX : I3<"max.s", smax>;
-defm UMAX : I3<"max.u", umax>;
-defm SMIN : I3<"min.s", smin>;
-defm UMIN : I3<"min.u", umin>;
-
-//
-// Wide multiplication
-//
-def MULWIDES64 :
- NVPTXInst<(outs Int64Regs:$dst), (ins Int32Regs:$a, Int32Regs:$b),
- "mul.wide.s32 \t$dst, $a, $b;", []>;
-def MULWIDES64Imm :
- NVPTXInst<(outs Int64Regs:$dst), (ins Int32Regs:$a, i32imm:$b),
- "mul.wide.s32 \t$dst, $a, $b;", []>;
-def MULWIDES64Imm64 :
- NVPTXInst<(outs Int64Regs:$dst), (ins Int32Regs:$a, i64imm:$b),
- "mul.wide.s32 \t$dst, $a, $b;", []>;
-
-def MULWIDEU64 :
- NVPTXInst<(outs Int64Regs:$dst), (ins Int32Regs:$a, Int32Regs:$b),
- "mul.wide.u32 \t$dst, $a, $b;", []>;
-def MULWIDEU64Imm :
- NVPTXInst<(outs Int64Regs:$dst), (ins Int32Regs:$a, i32imm:$b),
- "mul.wide.u32 \t$dst, $a, $b;", []>;
-def MULWIDEU64Imm64 :
- NVPTXInst<(outs Int64Regs:$dst), (ins Int32Regs:$a, i64imm:$b),
- "mul.wide.u32 \t$dst, $a, $b;", []>;
-
-def MULWIDES32 :
- NVPTXInst<(outs Int32Regs:$dst), (ins Int16Regs:$a, Int16Regs:$b),
- "mul.wide.s16 \t$dst, $a, $b;", []>;
-def MULWIDES32Imm :
- NVPTXInst<(outs Int32Regs:$dst), (ins Int16Regs:$a, i16imm:$b),
- "mul.wide.s16 \t$dst, $a, $b;", []>;
-def MULWIDES32Imm32 :
- NVPTXInst<(outs Int32Regs:$dst), (ins Int16Regs:$a, i32imm:$b),
- "mul.wide.s16 \t$dst, $a, $b;", []>;
-
-def MULWIDEU32 :
- NVPTXInst<(outs Int32Regs:$dst), (ins Int16Regs:$a, Int16Regs:$b),
- "mul.wide.u16 \t$dst, $a, $b;", []>;
-def MULWIDEU32Imm :
- NVPTXInst<(outs Int32Regs:$dst), (ins Int16Regs:$a, i16imm:$b),
- "mul.wide.u16 \t$dst, $a, $b;", []>;
-def MULWIDEU32Imm32 :
- NVPTXInst<(outs Int32Regs:$dst), (ins Int16Regs:$a, i32imm:$b),
- "mul.wide.u16 \t$dst, $a, $b;", []>;
-
-def SDTMulWide : SDTypeProfile<1, 2, [SDTCisSameAs<1, 2>]>;
-def mul_wide_signed : SDNode<"NVPTXISD::MUL_WIDE_SIGNED", SDTMulWide>;
-def mul_wide_unsigned : SDNode<"NVPTXISD::MUL_WIDE_UNSIGNED", SDTMulWide>;
-
-// Matchers for signed, unsigned mul.wide ISD nodes.
-def : Pat<(i32 (mul_wide_signed Int16Regs:$a, Int16Regs:$b)),
- (MULWIDES32 Int16Regs:$a, Int16Regs:$b)>,
- Requires<[doMulWide]>;
-def : Pat<(i32 (mul_wide_signed Int16Regs:$a, imm:$b)),
- (MULWIDES32Imm Int16Regs:$a, imm:$b)>,
- Requires<[doMulWide]>;
-def : Pat<(i32 (mul_wide_unsigned Int16Regs:$a, Int16Regs:$b)),
- (MULWIDEU32 Int16Regs:$a, Int16Regs:$b)>,
- Requires<[doMulWide]>;
-def : Pat<(i32 (mul_wide_unsigned Int16Regs:$a, imm:$b)),
- (MULWIDEU32Imm Int16Regs:$a, imm:$b)>,
- Requires<[doMulWide]>;
-
-def : Pat<(i64 (mul_wide_signed Int32Regs:$a, Int32Regs:$b)),
- (MULWIDES64 Int32Regs:$a, Int32Regs:$b)>,
- Requires<[doMulWide]>;
-def : Pat<(i64 (mul_wide_signed Int32Regs:$a, imm:$b)),
- (MULWIDES64Imm Int32Regs:$a, imm:$b)>,
- Requires<[doMulWide]>;
-def : Pat<(i64 (mul_wide_unsigned Int32Regs:$a, Int32Regs:$b)),
- (MULWIDEU64 Int32Regs:$a, Int32Regs:$b)>,
- Requires<[doMulWide]>;
-def : Pat<(i64 (mul_wide_unsigned Int32Regs:$a, imm:$b)),
- (MULWIDEU64Imm Int32Regs:$a, imm:$b)>,
- Requires<[doMulWide]>;
-
-// Predicates used for converting some patterns to mul.wide.
-def SInt32Const : PatLeaf<(imm), [{
- const APInt &v = N->getAPIntValue();
- return v.isSignedIntN(32);
-}]>;
-
-def UInt32Const : PatLeaf<(imm), [{
- const APInt &v = N->getAPIntValue();
- return v.isIntN(32);
-}]>;
-
-def SInt16Const : PatLeaf<(imm), [{
- const APInt &v = N->getAPIntValue();
- return v.isSignedIntN(16);
-}]>;
-
-def UInt16Const : PatLeaf<(imm), [{
- const APInt &v = N->getAPIntValue();
- return v.isIntN(16);
-}]>;
-
-def Int5Const : PatLeaf<(imm), [{
- // Check if 0 <= v < 32; only then will the result of (x << v) be an int32.
- const APInt &v = N->getAPIntValue();
- return v.sge(0) && v.slt(32);
-}]>;
-
-def Int4Const : PatLeaf<(imm), [{
- // Check if 0 <= v < 16; only then will the result of (x << v) be an int16.
- const APInt &v = N->getAPIntValue();
- return v.sge(0) && v.slt(16);
-}]>;
-
-def SHL2MUL32 : SDNodeXForm<imm, [{
- const APInt &v = N->getAPIntValue();
- APInt temp(32, 1);
- return CurDAG->getTargetConstant(temp.shl(v), SDLoc(N), MVT::i32);
-}]>;
-
-def SHL2MUL16 : SDNodeXForm<imm, [{
- const APInt &v = N->getAPIntValue();
- APInt temp(16, 1);
- return CurDAG->getTargetConstant(temp.shl(v), SDLoc(N), MVT::i16);
-}]>;
-
-// Convert "sign/zero-extend, then shift left by an immediate" to mul.wide.
-def : Pat<(shl (sext Int32Regs:$a), (i32 Int5Const:$b)),
- (MULWIDES64Imm Int32Regs:$a, (SHL2MUL32 node:$b))>,
- Requires<[doMulWide]>;
-def : Pat<(shl (zext Int32Regs:$a), (i32 Int5Const:$b)),
- (MULWIDEU64Imm Int32Regs:$a, (SHL2MUL32 node:$b))>,
- Requires<[doMulWide]>;
-
-def : Pat<(shl (sext Int16Regs:$a), (i16 Int4Const:$b)),
- (MULWIDES32Imm Int16Regs:$a, (SHL2MUL16 node:$b))>,
- Requires<[doMulWide]>;
-def : Pat<(shl (zext Int16Regs:$a), (i16 Int4Const:$b)),
- (MULWIDEU32Imm Int16Regs:$a, (SHL2MUL16 node:$b))>,
- Requires<[doMulWide]>;
-
-// Convert "sign/zero-extend then multiply" to mul.wide.
-def : Pat<(mul (sext Int32Regs:$a), (sext Int32Regs:$b)),
- (MULWIDES64 Int32Regs:$a, Int32Regs:$b)>,
- Requires<[doMulWide]>;
-def : Pat<(mul (sext Int32Regs:$a), (i64 SInt32Const:$b)),
- (MULWIDES64Imm64 Int32Regs:$a, (i64 SInt32Const:$b))>,
- Requires<[doMulWide]>;
-
-def : Pat<(mul (zext Int32Regs:$a), (zext Int32Regs:$b)),
- (MULWIDEU64 Int32Regs:$a, Int32Regs:$b)>,
- Requires<[doMulWide]>;
-def : Pat<(mul (zext Int32Regs:$a), (i64 UInt32Const:$b)),
- (MULWIDEU64Imm64 Int32Regs:$a, (i64 UInt32Const:$b))>,
- Requires<[doMulWide]>;
-
-def : Pat<(mul (sext Int16Regs:$a), (sext Int16Regs:$b)),
- (MULWIDES32 Int16Regs:$a, Int16Regs:$b)>,
- Requires<[doMulWide]>;
-def : Pat<(mul (sext Int16Regs:$a), (i32 SInt16Const:$b)),
- (MULWIDES32Imm32 Int16Regs:$a, (i32 SInt16Const:$b))>,
- Requires<[doMulWide]>;
-
-def : Pat<(mul (zext Int16Regs:$a), (zext Int16Regs:$b)),
- (MULWIDEU32 Int16Regs:$a, Int16Regs:$b)>,
- Requires<[doMulWide]>;
-def : Pat<(mul (zext Int16Regs:$a), (i32 UInt16Const:$b)),
- (MULWIDEU32Imm32 Int16Regs:$a, (i32 UInt16Const:$b))>,
- Requires<[doMulWide]>;
-
-//
-// Integer multiply-add
-//
-def SDTIMAD :
- SDTypeProfile<1, 3, [SDTCisSameAs<0, 1>, SDTCisInt<0>, SDTCisInt<2>,
- SDTCisSameAs<0, 2>, SDTCisSameAs<0, 3>]>;
-def imad : SDNode<"NVPTXISD::IMAD", SDTIMAD>;
-
-def MAD16rrr :
- NVPTXInst<(outs Int16Regs:$dst),
- (ins Int16Regs:$a, Int16Regs:$b, Int16Regs:$c),
- "mad.lo.s16 \t$dst, $a, $b, $c;",
- [(set Int16Regs:$dst, (imad Int16Regs:$a, Int16Regs:$b, Int16Regs:$c))]>;
-def MAD16rri :
- NVPTXInst<(outs Int16Regs:$dst),
- (ins Int16Regs:$a, Int16Regs:$b, i16imm:$c),
- "mad.lo.s16 \t$dst, $a, $b, $c;",
- [(set Int16Regs:$dst, (imad Int16Regs:$a, Int16Regs:$b, imm:$c))]>;
-def MAD16rir :
- NVPTXInst<(outs Int16Regs:$dst),
- (ins Int16Regs:$a, i16imm:$b, Int16Regs:$c),
- "mad.lo.s16 \t$dst, $a, $b, $c;",
- [(set Int16Regs:$dst, (imad Int16Regs:$a, imm:$b, Int16Regs:$c))]>;
-def MAD16rii :
- NVPTXInst<(outs Int16Regs:$dst),
- (ins Int16Regs:$a, i16imm:$b, i16imm:$c),
- "mad.lo.s16 \t$dst, $a, $b, $c;",
- [(set Int16Regs:$dst, (imad Int16Regs:$a, imm:$b, imm:$c))]>;
-
-def MAD32rrr :
- NVPTXInst<(outs Int32Regs:$dst),
- (ins Int32Regs:$a, Int32Regs:$b, Int32Regs:$c),
- "mad.lo.s32 \t$dst, $a, $b, $c;",
- [(set Int32Regs:$dst, (imad Int32Regs:$a, Int32Regs:$b, Int32Regs:$c))]>;
-def MAD32rri :
- NVPTXInst<(outs Int32Regs:$dst),
- (ins Int32Regs:$a, Int32Regs:$b, i32imm:$c),
- "mad.lo.s32 \t$dst, $a, $b, $c;",
- [(set Int32Regs:$dst, (imad Int32Regs:$a, Int32Regs:$b, imm:$c))]>;
-def MAD32rir :
- NVPTXInst<(outs Int32Regs:$dst),
- (ins Int32Regs:$a, i32imm:$b, Int32Regs:$c),
- "mad.lo.s32 \t$dst, $a, $b, $c;",
- [(set Int32Regs:$dst, (imad Int32Regs:$a, imm:$b, Int32Regs:$c))]>;
-def MAD32rii :
- NVPTXInst<(outs Int32Regs:$dst),
- (ins Int32Regs:$a, i32imm:$b, i32imm:$c),
- "mad.lo.s32 \t$dst, $a, $b, $c;",
- [(set Int32Regs:$dst, (imad Int32Regs:$a, imm:$b, imm:$c))]>;
-
-def MAD64rrr :
- NVPTXInst<(outs Int64Regs:$dst),
- (ins Int64Regs:$a, Int64Regs:$b, Int64Regs:$c),
- "mad.lo.s64 \t$dst, $a, $b, $c;",
- [(set Int64Regs:$dst, (imad Int64Regs:$a, Int64Regs:$b, Int64Regs:$c))]>;
-def MAD64rri :
- NVPTXInst<(outs Int64Regs:$dst),
- (ins Int64Regs:$a, Int64Regs:$b, i64imm:$c),
- "mad.lo.s64 \t$dst, $a, $b, $c;",
- [(set Int64Regs:$dst, (imad Int64Regs:$a, Int64Regs:$b, imm:$c))]>;
-def MAD64rir :
- NVPTXInst<(outs Int64Regs:$dst),
- (ins Int64Regs:$a, i64imm:$b, Int64Regs:$c),
- "mad.lo.s64 \t$dst, $a, $b, $c;",
- [(set Int64Regs:$dst, (imad Int64Regs:$a, imm:$b, Int64Regs:$c))]>;
-def MAD64rii :
- NVPTXInst<(outs Int64Regs:$dst),
- (ins Int64Regs:$a, i64imm:$b, i64imm:$c),
- "mad.lo.s64 \t$dst, $a, $b, $c;",
- [(set Int64Regs:$dst, (imad Int64Regs:$a, imm:$b, imm:$c))]>;
-
-def INEG16 :
- NVPTXInst<(outs Int16Regs:$dst), (ins Int16Regs:$src),
- "neg.s16 \t$dst, $src;",
- [(set Int16Regs:$dst, (ineg Int16Regs:$src))]>;
-def INEG32 :
- NVPTXInst<(outs Int32Regs:$dst), (ins Int32Regs:$src),
- "neg.s32 \t$dst, $src;",
- [(set Int32Regs:$dst, (ineg Int32Regs:$src))]>;
-def INEG64 :
- NVPTXInst<(outs Int64Regs:$dst), (ins Int64Regs:$src),
- "neg.s64 \t$dst, $src;",
- [(set Int64Regs:$dst, (ineg Int64Regs:$src))]>;
-
-//-----------------------------------
-// Floating Point Arithmetic
-//-----------------------------------
-
-// Constant 1.0f
-def FloatConst1 : PatLeaf<(fpimm), [{
- return &N->getValueAPF().getSemantics() == &llvm::APFloat::IEEEsingle() &&
- N->getValueAPF().convertToFloat() == 1.0f;
-}]>;
-// Constant 1.0 (double)
-def DoubleConst1 : PatLeaf<(fpimm), [{
- return &N->getValueAPF().getSemantics() == &llvm::APFloat::IEEEdouble() &&
- N->getValueAPF().convertToDouble() == 1.0;
-}]>;
-
-// Loads FP16 constant into a register.
-//
-// ptxas does not have hex representation for fp16, so we can't use
-// fp16 immediate values in .f16 instructions. Instead we have to load
-// the constant into a register using mov.b16.
-def LOAD_CONST_F16 :
- NVPTXInst<(outs Float16Regs:$dst), (ins f16imm:$a),
- "mov.b16 \t$dst, $a;", []>;
-
-defm FADD : F3_fma_component<"add", fadd>;
-defm FSUB : F3_fma_component<"sub", fsub>;
-defm FMUL : F3_fma_component<"mul", fmul>;
-
-defm FMIN : F3<"min", fminnum>;
-defm FMAX : F3<"max", fmaxnum>;
-
-defm FABS : F2<"abs", fabs>;
-defm FNEG : F2<"neg", fneg>;
-defm FSQRT : F2<"sqrt.rn", fsqrt>;
-
-//
-// F64 division
-//
-def FDIV641r :
- NVPTXInst<(outs Float64Regs:$dst),
- (ins f64imm:$a, Float64Regs:$b),
- "rcp.rn.f64 \t$dst, $b;",
- [(set Float64Regs:$dst, (fdiv DoubleConst1:$a, Float64Regs:$b))]>;
-def FDIV64rr :
- NVPTXInst<(outs Float64Regs:$dst),
- (ins Float64Regs:$a, Float64Regs:$b),
- "div.rn.f64 \t$dst, $a, $b;",
- [(set Float64Regs:$dst, (fdiv Float64Regs:$a, Float64Regs:$b))]>;
-def FDIV64ri :
- NVPTXInst<(outs Float64Regs:$dst),
- (ins Float64Regs:$a, f64imm:$b),
- "div.rn.f64 \t$dst, $a, $b;",
- [(set Float64Regs:$dst, (fdiv Float64Regs:$a, fpimm:$b))]>;
-
-//
-// F32 Approximate reciprocal
-//
-def FDIV321r_ftz :
- NVPTXInst<(outs Float32Regs:$dst),
- (ins f32imm:$a, Float32Regs:$b),
- "rcp.approx.ftz.f32 \t$dst, $b;",
- [(set Float32Regs:$dst, (fdiv FloatConst1:$a, Float32Regs:$b))]>,
- Requires<[do_DIVF32_APPROX, doF32FTZ]>;
-def FDIV321r :
- NVPTXInst<(outs Float32Regs:$dst),
- (ins f32imm:$a, Float32Regs:$b),
- "rcp.approx.f32 \t$dst, $b;",
- [(set Float32Regs:$dst, (fdiv FloatConst1:$a, Float32Regs:$b))]>,
- Requires<[do_DIVF32_APPROX]>;
-//
-// F32 Approximate division
-//
-def FDIV32approxrr_ftz :
- NVPTXInst<(outs Float32Regs:$dst),
- (ins Float32Regs:$a, Float32Regs:$b),
- "div.approx.ftz.f32 \t$dst, $a, $b;",
- [(set Float32Regs:$dst, (fdiv Float32Regs:$a, Float32Regs:$b))]>,
- Requires<[do_DIVF32_APPROX, doF32FTZ]>;
-def FDIV32approxri_ftz :
- NVPTXInst<(outs Float32Regs:$dst),
- (ins Float32Regs:$a, f32imm:$b),
- "div.approx.ftz.f32 \t$dst, $a, $b;",
- [(set Float32Regs:$dst, (fdiv Float32Regs:$a, fpimm:$b))]>,
- Requires<[do_DIVF32_APPROX, doF32FTZ]>;
-def FDIV32approxrr :
- NVPTXInst<(outs Float32Regs:$dst),
- (ins Float32Regs:$a, Float32Regs:$b),
- "div.approx.f32 \t$dst, $a, $b;",
- [(set Float32Regs:$dst, (fdiv Float32Regs:$a, Float32Regs:$b))]>,
- Requires<[do_DIVF32_APPROX]>;
-def FDIV32approxri :
- NVPTXInst<(outs Float32Regs:$dst),
- (ins Float32Regs:$a, f32imm:$b),
- "div.approx.f32 \t$dst, $a, $b;",
- [(set Float32Regs:$dst, (fdiv Float32Regs:$a, fpimm:$b))]>,
- Requires<[do_DIVF32_APPROX]>;
-//
-// F32 Semi-accurate reciprocal
-//
-// rcp.approx gives the same result as div.full(1.0f, a) and is faster.
-//
-def FDIV321r_approx_ftz :
- NVPTXInst<(outs Float32Regs:$dst),
- (ins f32imm:$a, Float32Regs:$b),
- "rcp.approx.ftz.f32 \t$dst, $b;",
- [(set Float32Regs:$dst, (fdiv FloatConst1:$a, Float32Regs:$b))]>,
- Requires<[do_DIVF32_FULL, doF32FTZ]>;
-def FDIV321r_approx :
- NVPTXInst<(outs Float32Regs:$dst),
- (ins f32imm:$a, Float32Regs:$b),
- "rcp.approx.f32 \t$dst, $b;",
- [(set Float32Regs:$dst, (fdiv FloatConst1:$a, Float32Regs:$b))]>,
- Requires<[do_DIVF32_FULL]>;
-//
-// F32 Semi-accurate division
-//
-def FDIV32rr_ftz :
- NVPTXInst<(outs Float32Regs:$dst),
- (ins Float32Regs:$a, Float32Regs:$b),
- "div.full.ftz.f32 \t$dst, $a, $b;",
- [(set Float32Regs:$dst, (fdiv Float32Regs:$a, Float32Regs:$b))]>,
- Requires<[do_DIVF32_FULL, doF32FTZ]>;
-def FDIV32ri_ftz :
- NVPTXInst<(outs Float32Regs:$dst),
- (ins Float32Regs:$a, f32imm:$b),
- "div.full.ftz.f32 \t$dst, $a, $b;",
- [(set Float32Regs:$dst, (fdiv Float32Regs:$a, fpimm:$b))]>,
- Requires<[do_DIVF32_FULL, doF32FTZ]>;
-def FDIV32rr :
- NVPTXInst<(outs Float32Regs:$dst),
- (ins Float32Regs:$a, Float32Regs:$b),
- "div.full.f32 \t$dst, $a, $b;",
- [(set Float32Regs:$dst, (fdiv Float32Regs:$a, Float32Regs:$b))]>,
- Requires<[do_DIVF32_FULL]>;
-def FDIV32ri :
- NVPTXInst<(outs Float32Regs:$dst),
- (ins Float32Regs:$a, f32imm:$b),
- "div.full.f32 \t$dst, $a, $b;",
- [(set Float32Regs:$dst, (fdiv Float32Regs:$a, fpimm:$b))]>,
- Requires<[do_DIVF32_FULL]>;
-//
-// F32 Accurate reciprocal
-//
-def FDIV321r_prec_ftz :
- NVPTXInst<(outs Float32Regs:$dst),
- (ins f32imm:$a, Float32Regs:$b),
- "rcp.rn.ftz.f32 \t$dst, $b;",
- [(set Float32Regs:$dst, (fdiv FloatConst1:$a, Float32Regs:$b))]>,
- Requires<[reqPTX20, doF32FTZ]>;
-def FDIV321r_prec :
- NVPTXInst<(outs Float32Regs:$dst),
- (ins f32imm:$a, Float32Regs:$b),
- "rcp.rn.f32 \t$dst, $b;",
- [(set Float32Regs:$dst, (fdiv FloatConst1:$a, Float32Regs:$b))]>,
- Requires<[reqPTX20]>;
-//
-// F32 Accurate division
-//
-def FDIV32rr_prec_ftz :
- NVPTXInst<(outs Float32Regs:$dst),
- (ins Float32Regs:$a, Float32Regs:$b),
- "div.rn.ftz.f32 \t$dst, $a, $b;",
- [(set Float32Regs:$dst, (fdiv Float32Regs:$a, Float32Regs:$b))]>,
- Requires<[doF32FTZ, reqPTX20]>;
-def FDIV32ri_prec_ftz :
- NVPTXInst<(outs Float32Regs:$dst),
- (ins Float32Regs:$a, f32imm:$b),
- "div.rn.ftz.f32 \t$dst, $a, $b;",
- [(set Float32Regs:$dst, (fdiv Float32Regs:$a, fpimm:$b))]>,
- Requires<[doF32FTZ, reqPTX20]>;
-def FDIV32rr_prec :
- NVPTXInst<(outs Float32Regs:$dst),
- (ins Float32Regs:$a, Float32Regs:$b),
- "div.rn.f32 \t$dst, $a, $b;",
- [(set Float32Regs:$dst, (fdiv Float32Regs:$a, Float32Regs:$b))]>,
- Requires<[reqPTX20]>;
-def FDIV32ri_prec :
- NVPTXInst<(outs Float32Regs:$dst),
- (ins Float32Regs:$a, f32imm:$b),
- "div.rn.f32 \t$dst, $a, $b;",
- [(set Float32Regs:$dst, (fdiv Float32Regs:$a, fpimm:$b))]>,
- Requires<[reqPTX20]>;
-
-//
-// FMA
-//
-
-multiclass FMA<string OpcStr, RegisterClass RC, Operand ImmCls, Predicate Pred> {
- def rrr : NVPTXInst<(outs RC:$dst), (ins RC:$a, RC:$b, RC:$c),
- !strconcat(OpcStr, " \t$dst, $a, $b, $c;"),
- [(set RC:$dst, (fma RC:$a, RC:$b, RC:$c))]>,
- Requires<[Pred]>;
- def rri : NVPTXInst<(outs RC:$dst),
- (ins RC:$a, RC:$b, ImmCls:$c),
- !strconcat(OpcStr, " \t$dst, $a, $b, $c;"),
- [(set RC:$dst, (fma RC:$a, RC:$b, fpimm:$c))]>,
- Requires<[Pred]>;
- def rir : NVPTXInst<(outs RC:$dst),
- (ins RC:$a, ImmCls:$b, RC:$c),
- !strconcat(OpcStr, " \t$dst, $a, $b, $c;"),
- [(set RC:$dst, (fma RC:$a, fpimm:$b, RC:$c))]>,
- Requires<[Pred]>;
- def rii : NVPTXInst<(outs RC:$dst),
- (ins RC:$a, ImmCls:$b, ImmCls:$c),
- !strconcat(OpcStr, " \t$dst, $a, $b, $c;"),
- [(set RC:$dst, (fma RC:$a, fpimm:$b, fpimm:$c))]>,
- Requires<[Pred]>;
-}
-
-multiclass FMA_F16<string OpcStr, RegisterClass RC, Predicate Pred> {
- def rrr : NVPTXInst<(outs RC:$dst), (ins RC:$a, RC:$b, RC:$c),
- !strconcat(OpcStr, " \t$dst, $a, $b, $c;"),
- [(set RC:$dst, (fma RC:$a, RC:$b, RC:$c))]>,
- Requires<[useFP16Math, Pred]>;
-}
-
-defm FMA16_ftz : FMA_F16<"fma.rn.ftz.f16", Float16Regs, doF32FTZ>;
-defm FMA16 : FMA_F16<"fma.rn.f16", Float16Regs, true>;
-defm FMA16x2_ftz : FMA_F16<"fma.rn.ftz.f16x2", Float16x2Regs, doF32FTZ>;
-defm FMA16x2 : FMA_F16<"fma.rn.f16x2", Float16x2Regs, true>;
-defm FMA32_ftz : FMA<"fma.rn.ftz.f32", Float32Regs, f32imm, doF32FTZ>;
-defm FMA32 : FMA<"fma.rn.f32", Float32Regs, f32imm, true>;
-defm FMA64 : FMA<"fma.rn.f64", Float64Regs, f64imm, true>;
-
-// sin/cos
-def SINF: NVPTXInst<(outs Float32Regs:$dst), (ins Float32Regs:$src),
- "sin.approx.f32 \t$dst, $src;",
- [(set Float32Regs:$dst, (fsin Float32Regs:$src))]>,
- Requires<[allowUnsafeFPMath]>;
-def COSF: NVPTXInst<(outs Float32Regs:$dst), (ins Float32Regs:$src),
- "cos.approx.f32 \t$dst, $src;",
- [(set Float32Regs:$dst, (fcos Float32Regs:$src))]>,
- Requires<[allowUnsafeFPMath]>;
-
-// Lower (frem x, y) into (sub x, (mul (floor (div x, y)) y)),
-// i.e. "poor man's fmod()"
-
-// frem - f32 FTZ
-def : Pat<(frem Float32Regs:$x, Float32Regs:$y),
- (FSUBf32rr_ftz Float32Regs:$x, (FMULf32rr_ftz (CVT_f32_f32
- (FDIV32rr_prec_ftz Float32Regs:$x, Float32Regs:$y), CvtRMI_FTZ),
- Float32Regs:$y))>,
- Requires<[doF32FTZ]>;
-def : Pat<(frem Float32Regs:$x, fpimm:$y),
- (FSUBf32rr_ftz Float32Regs:$x, (FMULf32ri_ftz (CVT_f32_f32
- (FDIV32ri_prec_ftz Float32Regs:$x, fpimm:$y), CvtRMI_FTZ),
- fpimm:$y))>,
- Requires<[doF32FTZ]>;
-
-// frem - f32
-def : Pat<(frem Float32Regs:$x, Float32Regs:$y),
- (FSUBf32rr Float32Regs:$x, (FMULf32rr (CVT_f32_f32
- (FDIV32rr_prec Float32Regs:$x, Float32Regs:$y), CvtRMI),
- Float32Regs:$y))>;
-def : Pat<(frem Float32Regs:$x, fpimm:$y),
- (FSUBf32rr Float32Regs:$x, (FMULf32ri (CVT_f32_f32
- (FDIV32ri_prec Float32Regs:$x, fpimm:$y), CvtRMI),
- fpimm:$y))>;
-
-// frem - f64
-def : Pat<(frem Float64Regs:$x, Float64Regs:$y),
- (FSUBf64rr Float64Regs:$x, (FMULf64rr (CVT_f64_f64
- (FDIV64rr Float64Regs:$x, Float64Regs:$y), CvtRMI),
- Float64Regs:$y))>;
-def : Pat<(frem Float64Regs:$x, fpimm:$y),
- (FSUBf64rr Float64Regs:$x, (FMULf64ri (CVT_f64_f64
- (FDIV64ri Float64Regs:$x, fpimm:$y), CvtRMI),
- fpimm:$y))>;
-
-//-----------------------------------
-// Bitwise operations
-//-----------------------------------
-
-// Template for three-arg bitwise operations. Takes three args, Creates .b16,
-// .b32, .b64, and .pred (predicate registers -- i.e., i1) versions of OpcStr.
-multiclass BITWISE<string OpcStr, SDNode OpNode> {
- def b1rr :
- NVPTXInst<(outs Int1Regs:$dst), (ins Int1Regs:$a, Int1Regs:$b),
- !strconcat(OpcStr, ".pred \t$dst, $a, $b;"),
- [(set Int1Regs:$dst, (OpNode Int1Regs:$a, Int1Regs:$b))]>;
- def b1ri :
- NVPTXInst<(outs Int1Regs:$dst), (ins Int1Regs:$a, i1imm:$b),
- !strconcat(OpcStr, ".pred \t$dst, $a, $b;"),
- [(set Int1Regs:$dst, (OpNode Int1Regs:$a, imm:$b))]>;
- def b16rr :
- NVPTXInst<(outs Int16Regs:$dst), (ins Int16Regs:$a, Int16Regs:$b),
- !strconcat(OpcStr, ".b16 \t$dst, $a, $b;"),
- [(set Int16Regs:$dst, (OpNode Int16Regs:$a, Int16Regs:$b))]>;
- def b16ri :
- NVPTXInst<(outs Int16Regs:$dst), (ins Int16Regs:$a, i16imm:$b),
- !strconcat(OpcStr, ".b16 \t$dst, $a, $b;"),
- [(set Int16Regs:$dst, (OpNode Int16Regs:$a, imm:$b))]>;
- def b32rr :
- NVPTXInst<(outs Int32Regs:$dst), (ins Int32Regs:$a, Int32Regs:$b),
- !strconcat(OpcStr, ".b32 \t$dst, $a, $b;"),
- [(set Int32Regs:$dst, (OpNode Int32Regs:$a, Int32Regs:$b))]>;
- def b32ri :
- NVPTXInst<(outs Int32Regs:$dst), (ins Int32Regs:$a, i32imm:$b),
- !strconcat(OpcStr, ".b32 \t$dst, $a, $b;"),
- [(set Int32Regs:$dst, (OpNode Int32Regs:$a, imm:$b))]>;
- def b64rr :
- NVPTXInst<(outs Int64Regs:$dst), (ins Int64Regs:$a, Int64Regs:$b),
- !strconcat(OpcStr, ".b64 \t$dst, $a, $b;"),
- [(set Int64Regs:$dst, (OpNode Int64Regs:$a, Int64Regs:$b))]>;
- def b64ri :
- NVPTXInst<(outs Int64Regs:$dst), (ins Int64Regs:$a, i64imm:$b),
- !strconcat(OpcStr, ".b64 \t$dst, $a, $b;"),
- [(set Int64Regs:$dst, (OpNode Int64Regs:$a, imm:$b))]>;
-}
-
-defm OR : BITWISE<"or", or>;
-defm AND : BITWISE<"and", and>;
-defm XOR : BITWISE<"xor", xor>;
-
-def NOT1 : NVPTXInst<(outs Int1Regs:$dst), (ins Int1Regs:$src),
- "not.pred \t$dst, $src;",
- [(set Int1Regs:$dst, (not Int1Regs:$src))]>;
-def NOT16 : NVPTXInst<(outs Int16Regs:$dst), (ins Int16Regs:$src),
- "not.b16 \t$dst, $src;",
- [(set Int16Regs:$dst, (not Int16Regs:$src))]>;
-def NOT32 : NVPTXInst<(outs Int32Regs:$dst), (ins Int32Regs:$src),
- "not.b32 \t$dst, $src;",
- [(set Int32Regs:$dst, (not Int32Regs:$src))]>;
-def NOT64 : NVPTXInst<(outs Int64Regs:$dst), (ins Int64Regs:$src),
- "not.b64 \t$dst, $src;",
- [(set Int64Regs:$dst, (not Int64Regs:$src))]>;
-
-// Template for left/right shifts. Takes three operands,
-// [dest (reg), src (reg), shift (reg or imm)].
-// dest and src may be int64, int32, or int16, but shift is always int32.
-//
-// This template also defines a 32-bit shift (imm, imm) instruction.
-multiclass SHIFT<string OpcStr, SDNode OpNode> {
- def i64rr :
- NVPTXInst<(outs Int64Regs:$dst), (ins Int64Regs:$a, Int32Regs:$b),
- !strconcat(OpcStr, "64 \t$dst, $a, $b;"),
- [(set Int64Regs:$dst, (OpNode Int64Regs:$a, Int32Regs:$b))]>;
- def i64ri :
- NVPTXInst<(outs Int64Regs:$dst), (ins Int64Regs:$a, i32imm:$b),
- !strconcat(OpcStr, "64 \t$dst, $a, $b;"),
- [(set Int64Regs:$dst, (OpNode Int64Regs:$a, (i32 imm:$b)))]>;
- def i32rr :
- NVPTXInst<(outs Int32Regs:$dst), (ins Int32Regs:$a, Int32Regs:$b),
- !strconcat(OpcStr, "32 \t$dst, $a, $b;"),
- [(set Int32Regs:$dst, (OpNode Int32Regs:$a, Int32Regs:$b))]>;
- def i32ri :
- NVPTXInst<(outs Int32Regs:$dst), (ins Int32Regs:$a, i32imm:$b),
- !strconcat(OpcStr, "32 \t$dst, $a, $b;"),
- [(set Int32Regs:$dst, (OpNode Int32Regs:$a, (i32 imm:$b)))]>;
- def i32ii :
- NVPTXInst<(outs Int32Regs:$dst), (ins i32imm:$a, i32imm:$b),
- !strconcat(OpcStr, "32 \t$dst, $a, $b;"),
- [(set Int32Regs:$dst, (OpNode (i32 imm:$a), (i32 imm:$b)))]>;
- def i16rr :
- NVPTXInst<(outs Int16Regs:$dst), (ins Int16Regs:$a, Int32Regs:$b),
- !strconcat(OpcStr, "16 \t$dst, $a, $b;"),
- [(set Int16Regs:$dst, (OpNode Int16Regs:$a, Int32Regs:$b))]>;
- def i16ri :
- NVPTXInst<(outs Int16Regs:$dst), (ins Int16Regs:$a, i32imm:$b),
- !strconcat(OpcStr, "16 \t$dst, $a, $b;"),
- [(set Int16Regs:$dst, (OpNode Int16Regs:$a, (i32 imm:$b)))]>;
-}
-
-defm SHL : SHIFT<"shl.b", shl>;
-defm SRA : SHIFT<"shr.s", sra>;
-defm SRL : SHIFT<"shr.u", srl>;
-
-// Bit-reverse
-def BREV32 :
- NVPTXInst<(outs Int32Regs:$dst), (ins Int32Regs:$a),
- "brev.b32 \t$dst, $a;",
- [(set Int32Regs:$dst, (bitreverse Int32Regs:$a))]>;
-def BREV64 :
- NVPTXInst<(outs Int64Regs:$dst), (ins Int64Regs:$a),
- "brev.b64 \t$dst, $a;",
- [(set Int64Regs:$dst, (bitreverse Int64Regs:$a))]>;
-
-//
-// Rotate: Use ptx shf instruction if available.
-//
-
-// 32 bit r2 = rotl r1, n
-// =>
-// r2 = shf.l r1, r1, n
-def ROTL32imm_hw :
- NVPTXInst<(outs Int32Regs:$dst), (ins Int32Regs:$src, i32imm:$amt),
- "shf.l.wrap.b32 \t$dst, $src, $src, $amt;",
- [(set Int32Regs:$dst, (rotl Int32Regs:$src, (i32 imm:$amt)))]>,
- Requires<[hasHWROT32]>;
-
-def ROTL32reg_hw :
- NVPTXInst<(outs Int32Regs:$dst), (ins Int32Regs:$src, Int32Regs:$amt),
- "shf.l.wrap.b32 \t$dst, $src, $src, $amt;",
- [(set Int32Regs:$dst, (rotl Int32Regs:$src, Int32Regs:$amt))]>,
- Requires<[hasHWROT32]>;
-
-// 32 bit r2 = rotr r1, n
-// =>
-// r2 = shf.r r1, r1, n
-def ROTR32imm_hw :
- NVPTXInst<(outs Int32Regs:$dst), (ins Int32Regs:$src, i32imm:$amt),
- "shf.r.wrap.b32 \t$dst, $src, $src, $amt;",
- [(set Int32Regs:$dst, (rotr Int32Regs:$src, (i32 imm:$amt)))]>,
- Requires<[hasHWROT32]>;
-
-def ROTR32reg_hw :
- NVPTXInst<(outs Int32Regs:$dst), (ins Int32Regs:$src, Int32Regs:$amt),
- "shf.r.wrap.b32 \t$dst, $src, $src, $amt;",
- [(set Int32Regs:$dst, (rotr Int32Regs:$src, Int32Regs:$amt))]>,
- Requires<[hasHWROT32]>;
-
-// 32-bit software rotate by immediate. $amt2 should equal 32 - $amt1.
-def ROT32imm_sw :
- NVPTXInst<(outs Int32Regs:$dst),
- (ins Int32Regs:$src, i32imm:$amt1, i32imm:$amt2),
- "{{\n\t"
- ".reg .b32 %lhs;\n\t"
- ".reg .b32 %rhs;\n\t"
- "shl.b32 \t%lhs, $src, $amt1;\n\t"
- "shr.b32 \t%rhs, $src, $amt2;\n\t"
- "add.u32 \t$dst, %lhs, %rhs;\n\t"
- "}}",
- []>;
-
-def SUB_FRM_32 : SDNodeXForm<imm, [{
- return CurDAG->getTargetConstant(32 - N->getZExtValue(), SDLoc(N), MVT::i32);
-}]>;
-
-def : Pat<(rotl Int32Regs:$src, (i32 imm:$amt)),
- (ROT32imm_sw Int32Regs:$src, imm:$amt, (SUB_FRM_32 node:$amt))>,
- Requires<[noHWROT32]>;
-def : Pat<(rotr Int32Regs:$src, (i32 imm:$amt)),
- (ROT32imm_sw Int32Regs:$src, (SUB_FRM_32 node:$amt), imm:$amt)>,
- Requires<[noHWROT32]>;
-
-// 32-bit software rotate left by register.
-def ROTL32reg_sw :
- NVPTXInst<(outs Int32Regs:$dst), (ins Int32Regs:$src, Int32Regs:$amt),
- "{{\n\t"
- ".reg .b32 %lhs;\n\t"
- ".reg .b32 %rhs;\n\t"
- ".reg .b32 %amt2;\n\t"
- "shl.b32 \t%lhs, $src, $amt;\n\t"
- "sub.s32 \t%amt2, 32, $amt;\n\t"
- "shr.b32 \t%rhs, $src, %amt2;\n\t"
- "add.u32 \t$dst, %lhs, %rhs;\n\t"
- "}}",
- [(set Int32Regs:$dst, (rotl Int32Regs:$src, Int32Regs:$amt))]>,
- Requires<[noHWROT32]>;
-
-// 32-bit software rotate right by register.
-def ROTR32reg_sw :
- NVPTXInst<(outs Int32Regs:$dst), (ins Int32Regs:$src, Int32Regs:$amt),
- "{{\n\t"
- ".reg .b32 %lhs;\n\t"
- ".reg .b32 %rhs;\n\t"
- ".reg .b32 %amt2;\n\t"
- "shr.b32 \t%lhs, $src, $amt;\n\t"
- "sub.s32 \t%amt2, 32, $amt;\n\t"
- "shl.b32 \t%rhs, $src, %amt2;\n\t"
- "add.u32 \t$dst, %lhs, %rhs;\n\t"
- "}}",
- [(set Int32Regs:$dst, (rotr Int32Regs:$src, Int32Regs:$amt))]>,
- Requires<[noHWROT32]>;
-
-// 64-bit software rotate by immediate. $amt2 should equal 64 - $amt1.
-def ROT64imm_sw :
- NVPTXInst<(outs Int64Regs:$dst),
- (ins Int64Regs:$src, i32imm:$amt1, i32imm:$amt2),
- "{{\n\t"
- ".reg .b64 %lhs;\n\t"
- ".reg .b64 %rhs;\n\t"
- "shl.b64 \t%lhs, $src, $amt1;\n\t"
- "shr.b64 \t%rhs, $src, $amt2;\n\t"
- "add.u64 \t$dst, %lhs, %rhs;\n\t"
- "}}",
- []>;
-
-def SUB_FRM_64 : SDNodeXForm<imm, [{
- return CurDAG->getTargetConstant(64-N->getZExtValue(), SDLoc(N), MVT::i32);
-}]>;
-
-def : Pat<(rotl Int64Regs:$src, (i32 imm:$amt)),
- (ROT64imm_sw Int64Regs:$src, imm:$amt, (SUB_FRM_64 node:$amt))>;
-def : Pat<(rotr Int64Regs:$src, (i32 imm:$amt)),
- (ROT64imm_sw Int64Regs:$src, (SUB_FRM_64 node:$amt), imm:$amt)>;
-
-// 64-bit software rotate left by register.
-def ROTL64reg_sw :
- NVPTXInst<(outs Int64Regs:$dst), (ins Int64Regs:$src, Int32Regs:$amt),
- "{{\n\t"
- ".reg .b64 %lhs;\n\t"
- ".reg .b64 %rhs;\n\t"
- ".reg .u32 %amt2;\n\t"
- "shl.b64 \t%lhs, $src, $amt;\n\t"
- "sub.u32 \t%amt2, 64, $amt;\n\t"
- "shr.b64 \t%rhs, $src, %amt2;\n\t"
- "add.u64 \t$dst, %lhs, %rhs;\n\t"
- "}}",
- [(set Int64Regs:$dst, (rotl Int64Regs:$src, Int32Regs:$amt))]>;
-
-def ROTR64reg_sw :
- NVPTXInst<(outs Int64Regs:$dst), (ins Int64Regs:$src, Int32Regs:$amt),
- "{{\n\t"
- ".reg .b64 %lhs;\n\t"
- ".reg .b64 %rhs;\n\t"
- ".reg .u32 %amt2;\n\t"
- "shr.b64 \t%lhs, $src, $amt;\n\t"
- "sub.u32 \t%amt2, 64, $amt;\n\t"
- "shl.b64 \t%rhs, $src, %amt2;\n\t"
- "add.u64 \t$dst, %lhs, %rhs;\n\t"
- "}}",
- [(set Int64Regs:$dst, (rotr Int64Regs:$src, Int32Regs:$amt))]>;
-
-//
-// Funnnel shift in clamp mode
-//
-
-// Create SDNodes so they can be used in the DAG code, e.g.
-// NVPTXISelLowering (LowerShiftLeftParts and LowerShiftRightParts)
-def SDTIntShiftDOp :
- SDTypeProfile<1, 3, [SDTCisSameAs<0, 1>, SDTCisSameAs<0, 2>,
- SDTCisInt<0>, SDTCisInt<3>]>;
-def FUN_SHFL_CLAMP : SDNode<"NVPTXISD::FUN_SHFL_CLAMP", SDTIntShiftDOp, []>;
-def FUN_SHFR_CLAMP : SDNode<"NVPTXISD::FUN_SHFR_CLAMP", SDTIntShiftDOp, []>;
-
-def FUNSHFLCLAMP :
- NVPTXInst<(outs Int32Regs:$dst),
- (ins Int32Regs:$lo, Int32Regs:$hi, Int32Regs:$amt),
- "shf.l.clamp.b32 \t$dst, $lo, $hi, $amt;",
- [(set Int32Regs:$dst,
- (FUN_SHFL_CLAMP Int32Regs:$lo, Int32Regs:$hi, Int32Regs:$amt))]>;
-
-def FUNSHFRCLAMP :
- NVPTXInst<(outs Int32Regs:$dst),
- (ins Int32Regs:$lo, Int32Regs:$hi, Int32Regs:$amt),
- "shf.r.clamp.b32 \t$dst, $lo, $hi, $amt;",
- [(set Int32Regs:$dst,
- (FUN_SHFR_CLAMP Int32Regs:$lo, Int32Regs:$hi, Int32Regs:$amt))]>;
-
-//
-// BFE - bit-field extract
-//
-
-// Template for BFE instructions. Takes four args,
-// [dest (reg), src (reg), start (reg or imm), end (reg or imm)].
-// Start may be an imm only if end is also an imm. FIXME: Is this a
-// restriction in PTX?
-//
-// dest and src may be int32 or int64, but start and end are always int32.
-multiclass BFE<string TyStr, RegisterClass RC> {
- def rrr
- : NVPTXInst<(outs RC:$d),
- (ins RC:$a, Int32Regs:$b, Int32Regs:$c),
- !strconcat("bfe.", TyStr, " \t$d, $a, $b, $c;"), []>;
- def rri
- : NVPTXInst<(outs RC:$d),
- (ins RC:$a, Int32Regs:$b, i32imm:$c),
- !strconcat("bfe.", TyStr, " \t$d, $a, $b, $c;"), []>;
- def rii
- : NVPTXInst<(outs RC:$d),
- (ins RC:$a, i32imm:$b, i32imm:$c),
- !strconcat("bfe.", TyStr, " \t$d, $a, $b, $c;"), []>;
-}
-
-let hasSideEffects = 0 in {
- defm BFE_S32 : BFE<"s32", Int32Regs>;
- defm BFE_U32 : BFE<"u32", Int32Regs>;
- defm BFE_S64 : BFE<"s64", Int64Regs>;
- defm BFE_U64 : BFE<"u64", Int64Regs>;
-}
-
-//-----------------------------------
-// Comparison instructions (setp, set)
-//-----------------------------------
-
-// FIXME: This doesn't cover versions of set and setp that combine with a
-// boolean predicate, e.g. setp.eq.and.b16.
-
-let hasSideEffects = 0 in {
- multiclass SETP<string TypeStr, RegisterClass RC, Operand ImmCls> {
- def rr :
- NVPTXInst<(outs Int1Regs:$dst), (ins RC:$a, RC:$b, CmpMode:$cmp),
- !strconcat("setp${cmp:base}${cmp:ftz}.", TypeStr,
- " \t$dst, $a, $b;"), []>;
- def ri :
- NVPTXInst<(outs Int1Regs:$dst), (ins RC:$a, ImmCls:$b, CmpMode:$cmp),
- !strconcat("setp${cmp:base}${cmp:ftz}.", TypeStr,
- " \t$dst, $a, $b;"), []>;
- def ir :
- NVPTXInst<(outs Int1Regs:$dst), (ins ImmCls:$a, RC:$b, CmpMode:$cmp),
- !strconcat("setp${cmp:base}${cmp:ftz}.", TypeStr,
- " \t$dst, $a, $b;"), []>;
- }
-}
-
-defm SETP_b16 : SETP<"b16", Int16Regs, i16imm>;
-defm SETP_s16 : SETP<"s16", Int16Regs, i16imm>;
-defm SETP_u16 : SETP<"u16", Int16Regs, i16imm>;
-defm SETP_b32 : SETP<"b32", Int32Regs, i32imm>;
-defm SETP_s32 : SETP<"s32", Int32Regs, i32imm>;
-defm SETP_u32 : SETP<"u32", Int32Regs, i32imm>;
-defm SETP_b64 : SETP<"b64", Int64Regs, i64imm>;
-defm SETP_s64 : SETP<"s64", Int64Regs, i64imm>;
-defm SETP_u64 : SETP<"u64", Int64Regs, i64imm>;
-defm SETP_f32 : SETP<"f32", Float32Regs, f32imm>;
-defm SETP_f64 : SETP<"f64", Float64Regs, f64imm>;
-def SETP_f16rr :
- NVPTXInst<(outs Int1Regs:$dst),
- (ins Float16Regs:$a, Float16Regs:$b, CmpMode:$cmp),
- "setp${cmp:base}${cmp:ftz}.f16 \t$dst, $a, $b;",
- []>, Requires<[useFP16Math]>;
-
-def SETP_f16x2rr :
- NVPTXInst<(outs Int1Regs:$p, Int1Regs:$q),
- (ins Float16x2Regs:$a, Float16x2Regs:$b, CmpMode:$cmp),
- "setp${cmp:base}${cmp:ftz}.f16x2 \t$p|$q, $a, $b;",
- []>,
- Requires<[useFP16Math]>;
-
-
-// FIXME: This doesn't appear to be correct. The "set" mnemonic has the form
-// "set.CmpOp{.ftz}.dtype.stype", where dtype is the type of the destination
-// reg, either u32, s32, or f32. Anyway these aren't used at the moment.
-
-let hasSideEffects = 0 in {
- multiclass SET<string TypeStr, RegisterClass RC, Operand ImmCls> {
- def rr : NVPTXInst<(outs Int32Regs:$dst),
- (ins RC:$a, RC:$b, CmpMode:$cmp),
- !strconcat("set$cmp.", TypeStr, " \t$dst, $a, $b;"), []>;
- def ri : NVPTXInst<(outs Int32Regs:$dst),
- (ins RC:$a, ImmCls:$b, CmpMode:$cmp),
- !strconcat("set$cmp.", TypeStr, " \t$dst, $a, $b;"), []>;
- def ir : NVPTXInst<(outs Int32Regs:$dst),
- (ins ImmCls:$a, RC:$b, CmpMode:$cmp),
- !strconcat("set$cmp.", TypeStr, " \t$dst, $a, $b;"), []>;
- }
-}
-
-defm SET_b16 : SET<"b16", Int16Regs, i16imm>;
-defm SET_s16 : SET<"s16", Int16Regs, i16imm>;
-defm SET_u16 : SET<"u16", Int16Regs, i16imm>;
-defm SET_b32 : SET<"b32", Int32Regs, i32imm>;
-defm SET_s32 : SET<"s32", Int32Regs, i32imm>;
-defm SET_u32 : SET<"u32", Int32Regs, i32imm>;
-defm SET_b64 : SET<"b64", Int64Regs, i64imm>;
-defm SET_s64 : SET<"s64", Int64Regs, i64imm>;
-defm SET_u64 : SET<"u64", Int64Regs, i64imm>;
-defm SET_f16 : SET<"f16", Float16Regs, f16imm>;
-defm SET_f32 : SET<"f32", Float32Regs, f32imm>;
-defm SET_f64 : SET<"f64", Float64Regs, f64imm>;
-
-//-----------------------------------
-// Selection instructions (selp)
-//-----------------------------------
-
-// FIXME: Missing slct
-
-// selp instructions that don't have any pattern matches; we explicitly use
-// them within this file.
-let hasSideEffects = 0 in {
- multiclass SELP<string TypeStr, RegisterClass RC, Operand ImmCls> {
- def rr : NVPTXInst<(outs RC:$dst),
- (ins RC:$a, RC:$b, Int1Regs:$p),
- !strconcat("selp.", TypeStr, " \t$dst, $a, $b, $p;"), []>;
- def ri : NVPTXInst<(outs RC:$dst),
- (ins RC:$a, ImmCls:$b, Int1Regs:$p),
- !strconcat("selp.", TypeStr, " \t$dst, $a, $b, $p;"), []>;
- def ir : NVPTXInst<(outs RC:$dst),
- (ins ImmCls:$a, RC:$b, Int1Regs:$p),
- !strconcat("selp.", TypeStr, " \t$dst, $a, $b, $p;"), []>;
- def ii : NVPTXInst<(outs RC:$dst),
- (ins ImmCls:$a, ImmCls:$b, Int1Regs:$p),
- !strconcat("selp.", TypeStr, " \t$dst, $a, $b, $p;"), []>;
- }
-
- multiclass SELP_PATTERN<string TypeStr, RegisterClass RC, Operand ImmCls,
- SDNode ImmNode> {
- def rr :
- NVPTXInst<(outs RC:$dst),
- (ins RC:$a, RC:$b, Int1Regs:$p),
- !strconcat("selp.", TypeStr, " \t$dst, $a, $b, $p;"),
- [(set RC:$dst, (select Int1Regs:$p, RC:$a, RC:$b))]>;
- def ri :
- NVPTXInst<(outs RC:$dst),
- (ins RC:$a, ImmCls:$b, Int1Regs:$p),
- !strconcat("selp.", TypeStr, " \t$dst, $a, $b, $p;"),
- [(set RC:$dst, (select Int1Regs:$p, RC:$a, ImmNode:$b))]>;
- def ir :
- NVPTXInst<(outs RC:$dst),
- (ins ImmCls:$a, RC:$b, Int1Regs:$p),
- !strconcat("selp.", TypeStr, " \t$dst, $a, $b, $p;"),
- [(set RC:$dst, (select Int1Regs:$p, ImmNode:$a, RC:$b))]>;
- def ii :
- NVPTXInst<(outs RC:$dst),
- (ins ImmCls:$a, ImmCls:$b, Int1Regs:$p),
- !strconcat("selp.", TypeStr, " \t$dst, $a, $b, $p;"),
- [(set RC:$dst, (select Int1Regs:$p, ImmNode:$a, ImmNode:$b))]>;
- }
-}
-
-// Don't pattern match on selp.{s,u}{16,32,64} -- selp.b{16,32,64} is just as
-// good.
-defm SELP_b16 : SELP_PATTERN<"b16", Int16Regs, i16imm, imm>;
-defm SELP_s16 : SELP<"s16", Int16Regs, i16imm>;
-defm SELP_u16 : SELP<"u16", Int16Regs, i16imm>;
-defm SELP_b32 : SELP_PATTERN<"b32", Int32Regs, i32imm, imm>;
-defm SELP_s32 : SELP<"s32", Int32Regs, i32imm>;
-defm SELP_u32 : SELP<"u32", Int32Regs, i32imm>;
-defm SELP_b64 : SELP_PATTERN<"b64", Int64Regs, i64imm, imm>;
-defm SELP_s64 : SELP<"s64", Int64Regs, i64imm>;
-defm SELP_u64 : SELP<"u64", Int64Regs, i64imm>;
-defm SELP_f16 : SELP_PATTERN<"b16", Float16Regs, f16imm, fpimm>;
-defm SELP_f32 : SELP_PATTERN<"f32", Float32Regs, f32imm, fpimm>;
-defm SELP_f64 : SELP_PATTERN<"f64", Float64Regs, f64imm, fpimm>;
-
-def SELP_f16x2rr :
- NVPTXInst<(outs Float16x2Regs:$dst),
- (ins Float16x2Regs:$a, Float16x2Regs:$b, Int1Regs:$p),
- "selp.b32 \t$dst, $a, $b, $p;",
- [(set Float16x2Regs:$dst,
- (select Int1Regs:$p, Float16x2Regs:$a, Float16x2Regs:$b))]>;
-
-//-----------------------------------
-// Data Movement (Load / Store, Move)
-//-----------------------------------
-
-def ADDRri : ComplexPattern<i32, 2, "SelectADDRri", [frameindex],
- [SDNPWantRoot]>;
-def ADDRri64 : ComplexPattern<i64, 2, "SelectADDRri64", [frameindex],
- [SDNPWantRoot]>;
-
-def MEMri : Operand<i32> {
- let PrintMethod = "printMemOperand";
- let MIOperandInfo = (ops Int32Regs, i32imm);
-}
-def MEMri64 : Operand<i64> {
- let PrintMethod = "printMemOperand";
- let MIOperandInfo = (ops Int64Regs, i64imm);
-}
-
-def imem : Operand<iPTR> {
- let PrintMethod = "printOperand";
-}
-
-def imemAny : Operand<iPTRAny> {
- let PrintMethod = "printOperand";
-}
-
-def LdStCode : Operand<i32> {
- let PrintMethod = "printLdStCode";
-}
-
-def SDTWrapper : SDTypeProfile<1, 1, [SDTCisSameAs<0, 1>, SDTCisPtrTy<0>]>;
-def Wrapper : SDNode<"NVPTXISD::Wrapper", SDTWrapper>;
-
-// Load a memory address into a u32 or u64 register.
-def MOV_ADDR : NVPTXInst<(outs Int32Regs:$dst), (ins imem:$a),
- "mov.u32 \t$dst, $a;",
- [(set Int32Regs:$dst, (Wrapper tglobaladdr:$a))]>;
-def MOV_ADDR64 : NVPTXInst<(outs Int64Regs:$dst), (ins imem:$a),
- "mov.u64 \t$dst, $a;",
- [(set Int64Regs:$dst, (Wrapper tglobaladdr:$a))]>;
-
-// Get pointer to local stack.
-let hasSideEffects = 0 in {
- def MOV_DEPOT_ADDR : NVPTXInst<(outs Int32Regs:$d), (ins i32imm:$num),
- "mov.u32 \t$d, __local_depot$num;", []>;
- def MOV_DEPOT_ADDR_64 : NVPTXInst<(outs Int64Regs:$d), (ins i32imm:$num),
- "mov.u64 \t$d, __local_depot$num;", []>;
-}
-
-
-// copyPhysreg is hard-coded in NVPTXInstrInfo.cpp
-let IsSimpleMove=1, hasSideEffects=0 in {
- def IMOV1rr : NVPTXInst<(outs Int1Regs:$dst), (ins Int1Regs:$sss),
- "mov.pred \t$dst, $sss;", []>;
- def IMOV16rr : NVPTXInst<(outs Int16Regs:$dst), (ins Int16Regs:$sss),
- "mov.u16 \t$dst, $sss;", []>;
- def IMOV32rr : NVPTXInst<(outs Int32Regs:$dst), (ins Int32Regs:$sss),
- "mov.u32 \t$dst, $sss;", []>;
- def IMOV64rr : NVPTXInst<(outs Int64Regs:$dst), (ins Int64Regs:$sss),
- "mov.u64 \t$dst, $sss;", []>;
-
- def FMOV16rr : NVPTXInst<(outs Float16Regs:$dst), (ins Float16Regs:$src),
- // We have to use .b16 here as there's no mov.f16.
- "mov.b16 \t$dst, $src;", []>;
- def FMOV32rr : NVPTXInst<(outs Float32Regs:$dst), (ins Float32Regs:$src),
- "mov.f32 \t$dst, $src;", []>;
- def FMOV64rr : NVPTXInst<(outs Float64Regs:$dst), (ins Float64Regs:$src),
- "mov.f64 \t$dst, $src;", []>;
-}
-
-def IMOV1ri : NVPTXInst<(outs Int1Regs:$dst), (ins i1imm:$src),
- "mov.pred \t$dst, $src;",
- [(set Int1Regs:$dst, imm:$src)]>;
-def IMOV16ri : NVPTXInst<(outs Int16Regs:$dst), (ins i16imm:$src),
- "mov.u16 \t$dst, $src;",
- [(set Int16Regs:$dst, imm:$src)]>;
-def IMOV32ri : NVPTXInst<(outs Int32Regs:$dst), (ins i32imm:$src),
- "mov.u32 \t$dst, $src;",
- [(set Int32Regs:$dst, imm:$src)]>;
-def IMOV64i : NVPTXInst<(outs Int64Regs:$dst), (ins i64imm:$src),
- "mov.u64 \t$dst, $src;",
- [(set Int64Regs:$dst, imm:$src)]>;
-
-def FMOV32ri : NVPTXInst<(outs Float32Regs:$dst), (ins f32imm:$src),
- "mov.f32 \t$dst, $src;",
- [(set Float32Regs:$dst, fpimm:$src)]>;
-def FMOV64ri : NVPTXInst<(outs Float64Regs:$dst), (ins f64imm:$src),
- "mov.f64 \t$dst, $src;",
- [(set Float64Regs:$dst, fpimm:$src)]>;
-
-def : Pat<(i32 (Wrapper texternalsym:$dst)), (IMOV32ri texternalsym:$dst)>;
-
-//---- Copy Frame Index ----
-def LEA_ADDRi : NVPTXInst<(outs Int32Regs:$dst), (ins MEMri:$addr),
- "add.u32 \t$dst, ${addr:add};",
- [(set Int32Regs:$dst, ADDRri:$addr)]>;
-def LEA_ADDRi64 : NVPTXInst<(outs Int64Regs:$dst), (ins MEMri64:$addr),
- "add.u64 \t$dst, ${addr:add};",
- [(set Int64Regs:$dst, ADDRri64:$addr)]>;
-
-//-----------------------------------
-// Comparison and Selection
-//-----------------------------------
-
-multiclass ISET_FORMAT<PatFrag OpNode, PatLeaf Mode,
- Instruction setp_16rr,
- Instruction setp_16ri,
- Instruction setp_16ir,
- Instruction setp_32rr,
- Instruction setp_32ri,
- Instruction setp_32ir,
- Instruction setp_64rr,
- Instruction setp_64ri,
- Instruction setp_64ir,
- Instruction set_16rr,
- Instruction set_16ri,
- Instruction set_16ir,
- Instruction set_32rr,
- Instruction set_32ri,
- Instruction set_32ir,
- Instruction set_64rr,
- Instruction set_64ri,
- Instruction set_64ir> {
- // i16 -> pred
- def : Pat<(i1 (OpNode Int16Regs:$a, Int16Regs:$b)),
- (setp_16rr Int16Regs:$a, Int16Regs:$b, Mode)>;
- def : Pat<(i1 (OpNode Int16Regs:$a, imm:$b)),
- (setp_16ri Int16Regs:$a, imm:$b, Mode)>;
- def : Pat<(i1 (OpNode imm:$a, Int16Regs:$b)),
- (setp_16ir imm:$a, Int16Regs:$b, Mode)>;
- // i32 -> pred
- def : Pat<(i1 (OpNode Int32Regs:$a, Int32Regs:$b)),
- (setp_32rr Int32Regs:$a, Int32Regs:$b, Mode)>;
- def : Pat<(i1 (OpNode Int32Regs:$a, imm:$b)),
- (setp_32ri Int32Regs:$a, imm:$b, Mode)>;
- def : Pat<(i1 (OpNode imm:$a, Int32Regs:$b)),
- (setp_32ir imm:$a, Int32Regs:$b, Mode)>;
- // i64 -> pred
- def : Pat<(i1 (OpNode Int64Regs:$a, Int64Regs:$b)),
- (setp_64rr Int64Regs:$a, Int64Regs:$b, Mode)>;
- def : Pat<(i1 (OpNode Int64Regs:$a, imm:$b)),
- (setp_64ri Int64Regs:$a, imm:$b, Mode)>;
- def : Pat<(i1 (OpNode imm:$a, Int64Regs:$b)),
- (setp_64ir imm:$a, Int64Regs:$b, Mode)>;
-
- // i16 -> i32
- def : Pat<(i32 (OpNode Int16Regs:$a, Int16Regs:$b)),
- (set_16rr Int16Regs:$a, Int16Regs:$b, Mode)>;
- def : Pat<(i32 (OpNode Int16Regs:$a, imm:$b)),
- (set_16ri Int16Regs:$a, imm:$b, Mode)>;
- def : Pat<(i32 (OpNode imm:$a, Int16Regs:$b)),
- (set_16ir imm:$a, Int16Regs:$b, Mode)>;
- // i32 -> i32
- def : Pat<(i32 (OpNode Int32Regs:$a, Int32Regs:$b)),
- (set_32rr Int32Regs:$a, Int32Regs:$b, Mode)>;
- def : Pat<(i32 (OpNode Int32Regs:$a, imm:$b)),
- (set_32ri Int32Regs:$a, imm:$b, Mode)>;
- def : Pat<(i32 (OpNode imm:$a, Int32Regs:$b)),
- (set_32ir imm:$a, Int32Regs:$b, Mode)>;
- // i64 -> i32
- def : Pat<(i32 (OpNode Int64Regs:$a, Int64Regs:$b)),
- (set_64rr Int64Regs:$a, Int64Regs:$b, Mode)>;
- def : Pat<(i32 (OpNode Int64Regs:$a, imm:$b)),
- (set_64ri Int64Regs:$a, imm:$b, Mode)>;
- def : Pat<(i32 (OpNode imm:$a, Int64Regs:$b)),
- (set_64ir imm:$a, Int64Regs:$b, Mode)>;
-}
-
-multiclass ISET_FORMAT_SIGNED<PatFrag OpNode, PatLeaf Mode>
- : ISET_FORMAT<OpNode, Mode,
- SETP_s16rr, SETP_s16ri, SETP_s16ir,
- SETP_s32rr, SETP_s32ri, SETP_s32ir,
- SETP_s64rr, SETP_s64ri, SETP_s64ir,
- SET_s16rr, SET_s16ri, SET_s16ir,
- SET_s32rr, SET_s32ri, SET_s32ir,
- SET_s64rr, SET_s64ri, SET_s64ir> {
- // TableGen doesn't like empty multiclasses.
- def : PatLeaf<(i32 0)>;
-}
-
-multiclass ISET_FORMAT_UNSIGNED<PatFrag OpNode, PatLeaf Mode>
- : ISET_FORMAT<OpNode, Mode,
- SETP_u16rr, SETP_u16ri, SETP_u16ir,
- SETP_u32rr, SETP_u32ri, SETP_u32ir,
- SETP_u64rr, SETP_u64ri, SETP_u64ir,
- SET_u16rr, SET_u16ri, SET_u16ir,
- SET_u32rr, SET_u32ri, SET_u32ir,
- SET_u64rr, SET_u64ri, SET_u64ir> {
- // TableGen doesn't like empty multiclasses.
- def : PatLeaf<(i32 0)>;
-}
-
-defm : ISET_FORMAT_SIGNED<setgt, CmpGT>;
-defm : ISET_FORMAT_SIGNED<setlt, CmpLT>;
-defm : ISET_FORMAT_SIGNED<setge, CmpGE>;
-defm : ISET_FORMAT_SIGNED<setle, CmpLE>;
-defm : ISET_FORMAT_SIGNED<seteq, CmpEQ>;
-defm : ISET_FORMAT_SIGNED<setne, CmpNE>;
-defm : ISET_FORMAT_UNSIGNED<setugt, CmpGT>;
-defm : ISET_FORMAT_UNSIGNED<setult, CmpLT>;
-defm : ISET_FORMAT_UNSIGNED<setuge, CmpGE>;
-defm : ISET_FORMAT_UNSIGNED<setule, CmpLE>;
-defm : ISET_FORMAT_UNSIGNED<setueq, CmpEQ>;
-defm : ISET_FORMAT_UNSIGNED<setune, CmpNE>;
-
-// i1 compares
-def : Pat<(setne Int1Regs:$a, Int1Regs:$b),
- (XORb1rr Int1Regs:$a, Int1Regs:$b)>;
-def : Pat<(setune Int1Regs:$a, Int1Regs:$b),
- (XORb1rr Int1Regs:$a, Int1Regs:$b)>;
-
-def : Pat<(seteq Int1Regs:$a, Int1Regs:$b),
- (NOT1 (XORb1rr Int1Regs:$a, Int1Regs:$b))>;
-def : Pat<(setueq Int1Regs:$a, Int1Regs:$b),
- (NOT1 (XORb1rr Int1Regs:$a, Int1Regs:$b))>;
-
-// i1 compare -> i32
-def : Pat<(i32 (setne Int1Regs:$a, Int1Regs:$b)),
- (SELP_u32ii -1, 0, (XORb1rr Int1Regs:$a, Int1Regs:$b))>;
-def : Pat<(i32 (setne Int1Regs:$a, Int1Regs:$b)),
- (SELP_u32ii 0, -1, (XORb1rr Int1Regs:$a, Int1Regs:$b))>;
-
-
-
-multiclass FSET_FORMAT<PatFrag OpNode, PatLeaf Mode, PatLeaf ModeFTZ> {
- // f16 -> pred
- def : Pat<(i1 (OpNode Float16Regs:$a, Float16Regs:$b)),
- (SETP_f16rr Float16Regs:$a, Float16Regs:$b, ModeFTZ)>,
- Requires<[useFP16Math,doF32FTZ]>;
- def : Pat<(i1 (OpNode Float16Regs:$a, Float16Regs:$b)),
- (SETP_f16rr Float16Regs:$a, Float16Regs:$b, Mode)>,
- Requires<[useFP16Math]>;
- def : Pat<(i1 (OpNode Float16Regs:$a, fpimm:$b)),
- (SETP_f16rr Float16Regs:$a, (LOAD_CONST_F16 fpimm:$b), ModeFTZ)>,
- Requires<[useFP16Math,doF32FTZ]>;
- def : Pat<(i1 (OpNode Float16Regs:$a, fpimm:$b)),
- (SETP_f16rr Float16Regs:$a, (LOAD_CONST_F16 fpimm:$b), Mode)>,
- Requires<[useFP16Math]>;
- def : Pat<(i1 (OpNode fpimm:$a, Float16Regs:$b)),
- (SETP_f16rr (LOAD_CONST_F16 fpimm:$a), Float16Regs:$b, ModeFTZ)>,
- Requires<[useFP16Math,doF32FTZ]>;
- def : Pat<(i1 (OpNode fpimm:$a, Float16Regs:$b)),
- (SETP_f16rr (LOAD_CONST_F16 fpimm:$a), Float16Regs:$b, Mode)>,
- Requires<[useFP16Math]>;
-
- // f32 -> pred
- def : Pat<(i1 (OpNode Float32Regs:$a, Float32Regs:$b)),
- (SETP_f32rr Float32Regs:$a, Float32Regs:$b, ModeFTZ)>,
- Requires<[doF32FTZ]>;
- def : Pat<(i1 (OpNode Float32Regs:$a, Float32Regs:$b)),
- (SETP_f32rr Float32Regs:$a, Float32Regs:$b, Mode)>;
- def : Pat<(i1 (OpNode Float32Regs:$a, fpimm:$b)),
- (SETP_f32ri Float32Regs:$a, fpimm:$b, ModeFTZ)>,
- Requires<[doF32FTZ]>;
- def : Pat<(i1 (OpNode Float32Regs:$a, fpimm:$b)),
- (SETP_f32ri Float32Regs:$a, fpimm:$b, Mode)>;
- def : Pat<(i1 (OpNode fpimm:$a, Float32Regs:$b)),
- (SETP_f32ir fpimm:$a, Float32Regs:$b, ModeFTZ)>,
- Requires<[doF32FTZ]>;
- def : Pat<(i1 (OpNode fpimm:$a, Float32Regs:$b)),
- (SETP_f32ir fpimm:$a, Float32Regs:$b, Mode)>;
-
- // f64 -> pred
- def : Pat<(i1 (OpNode Float64Regs:$a, Float64Regs:$b)),
- (SETP_f64rr Float64Regs:$a, Float64Regs:$b, Mode)>;
- def : Pat<(i1 (OpNode Float64Regs:$a, fpimm:$b)),
- (SETP_f64ri Float64Regs:$a, fpimm:$b, Mode)>;
- def : Pat<(i1 (OpNode fpimm:$a, Float64Regs:$b)),
- (SETP_f64ir fpimm:$a, Float64Regs:$b, Mode)>;
-
- // f16 -> i32
- def : Pat<(i32 (OpNode Float16Regs:$a, Float16Regs:$b)),
- (SET_f16rr Float16Regs:$a, Float16Regs:$b, ModeFTZ)>,
- Requires<[useFP16Math, doF32FTZ]>;
- def : Pat<(i32 (OpNode Float16Regs:$a, Float16Regs:$b)),
- (SET_f16rr Float16Regs:$a, Float16Regs:$b, Mode)>,
- Requires<[useFP16Math]>;
- def : Pat<(i32 (OpNode Float16Regs:$a, fpimm:$b)),
- (SET_f16rr Float16Regs:$a, (LOAD_CONST_F16 fpimm:$b), ModeFTZ)>,
- Requires<[useFP16Math, doF32FTZ]>;
- def : Pat<(i32 (OpNode Float16Regs:$a, fpimm:$b)),
- (SET_f16rr Float16Regs:$a, (LOAD_CONST_F16 fpimm:$b), Mode)>,
- Requires<[useFP16Math]>;
- def : Pat<(i32 (OpNode fpimm:$a, Float16Regs:$b)),
- (SET_f16ir (LOAD_CONST_F16 fpimm:$a), Float16Regs:$b, ModeFTZ)>,
- Requires<[useFP16Math, doF32FTZ]>;
- def : Pat<(i32 (OpNode fpimm:$a, Float16Regs:$b)),
- (SET_f16ir (LOAD_CONST_F16 fpimm:$a), Float16Regs:$b, Mode)>,
- Requires<[useFP16Math]>;
-
- // f32 -> i32
- def : Pat<(i32 (OpNode Float32Regs:$a, Float32Regs:$b)),
- (SET_f32rr Float32Regs:$a, Float32Regs:$b, ModeFTZ)>,
- Requires<[doF32FTZ]>;
- def : Pat<(i32 (OpNode Float32Regs:$a, Float32Regs:$b)),
- (SET_f32rr Float32Regs:$a, Float32Regs:$b, Mode)>;
- def : Pat<(i32 (OpNode Float32Regs:$a, fpimm:$b)),
- (SET_f32ri Float32Regs:$a, fpimm:$b, ModeFTZ)>,
- Requires<[doF32FTZ]>;
- def : Pat<(i32 (OpNode Float32Regs:$a, fpimm:$b)),
- (SET_f32ri Float32Regs:$a, fpimm:$b, Mode)>;
- def : Pat<(i32 (OpNode fpimm:$a, Float32Regs:$b)),
- (SET_f32ir fpimm:$a, Float32Regs:$b, ModeFTZ)>,
- Requires<[doF32FTZ]>;
- def : Pat<(i32 (OpNode fpimm:$a, Float32Regs:$b)),
- (SET_f32ir fpimm:$a, Float32Regs:$b, Mode)>;
-
- // f64 -> i32
- def : Pat<(i32 (OpNode Float64Regs:$a, Float64Regs:$b)),
- (SET_f64rr Float64Regs:$a, Float64Regs:$b, Mode)>;
- def : Pat<(i32 (OpNode Float64Regs:$a, fpimm:$b)),
- (SET_f64ri Float64Regs:$a, fpimm:$b, Mode)>;
- def : Pat<(i32 (OpNode fpimm:$a, Float64Regs:$b)),
- (SET_f64ir fpimm:$a, Float64Regs:$b, Mode)>;
-}
-
-defm FSetOGT : FSET_FORMAT<setogt, CmpGT, CmpGT_FTZ>;
-defm FSetOLT : FSET_FORMAT<setolt, CmpLT, CmpLT_FTZ>;
-defm FSetOGE : FSET_FORMAT<setoge, CmpGE, CmpGE_FTZ>;
-defm FSetOLE : FSET_FORMAT<setole, CmpLE, CmpLE_FTZ>;
-defm FSetOEQ : FSET_FORMAT<setoeq, CmpEQ, CmpEQ_FTZ>;
-defm FSetONE : FSET_FORMAT<setone, CmpNE, CmpNE_FTZ>;
-
-defm FSetUGT : FSET_FORMAT<setugt, CmpGTU, CmpGTU_FTZ>;
-defm FSetULT : FSET_FORMAT<setult, CmpLTU, CmpLTU_FTZ>;
-defm FSetUGE : FSET_FORMAT<setuge, CmpGEU, CmpGEU_FTZ>;
-defm FSetULE : FSET_FORMAT<setule, CmpLEU, CmpLEU_FTZ>;
-defm FSetUEQ : FSET_FORMAT<setueq, CmpEQU, CmpEQU_FTZ>;
-defm FSetUNE : FSET_FORMAT<setune, CmpNEU, CmpNEU_FTZ>;
-
-defm FSetGT : FSET_FORMAT<setgt, CmpGT, CmpGT_FTZ>;
-defm FSetLT : FSET_FORMAT<setlt, CmpLT, CmpLT_FTZ>;
-defm FSetGE : FSET_FORMAT<setge, CmpGE, CmpGE_FTZ>;
-defm FSetLE : FSET_FORMAT<setle, CmpLE, CmpLE_FTZ>;
-defm FSetEQ : FSET_FORMAT<seteq, CmpEQ, CmpEQ_FTZ>;
-defm FSetNE : FSET_FORMAT<setne, CmpNE, CmpNE_FTZ>;
-
-defm FSetNUM : FSET_FORMAT<seto, CmpNUM, CmpNUM_FTZ>;
-defm FSetNAN : FSET_FORMAT<setuo, CmpNAN, CmpNAN_FTZ>;
-
-// FIXME: What is this doing here? Can it be deleted?
-// def ld_param : SDNode<"NVPTXISD::LOAD_PARAM", SDTLoad,
-// [SDNPHasChain, SDNPMayLoad, SDNPMemOperand]>;
-
-def SDTDeclareParamProfile :
- SDTypeProfile<0, 3, [SDTCisInt<0>, SDTCisInt<1>, SDTCisInt<2>]>;
-def SDTDeclareScalarParamProfile :
- SDTypeProfile<0, 3, [SDTCisInt<0>, SDTCisInt<1>, SDTCisInt<2>]>;
-def SDTLoadParamProfile : SDTypeProfile<1, 2, [SDTCisInt<1>, SDTCisInt<2>]>;
-def SDTLoadParamV2Profile : SDTypeProfile<2, 2, [SDTCisSameAs<0, 1>, SDTCisInt<2>, SDTCisInt<3>]>;
-def SDTLoadParamV4Profile : SDTypeProfile<4, 2, [SDTCisInt<4>, SDTCisInt<5>]>;
-def SDTPrintCallProfile : SDTypeProfile<0, 1, [SDTCisInt<0>]>;
-def SDTPrintCallUniProfile : SDTypeProfile<0, 1, [SDTCisInt<0>]>;
-def SDTStoreParamProfile : SDTypeProfile<0, 3, [SDTCisInt<0>, SDTCisInt<1>]>;
-def SDTStoreParamV2Profile : SDTypeProfile<0, 4, [SDTCisInt<0>, SDTCisInt<1>]>;
-def SDTStoreParamV4Profile : SDTypeProfile<0, 6, [SDTCisInt<0>, SDTCisInt<1>]>;
-def SDTStoreParam32Profile : SDTypeProfile<0, 3, [SDTCisInt<0>, SDTCisInt<1>]>;
-def SDTCallArgProfile : SDTypeProfile<0, 2, [SDTCisInt<0>]>;
-def SDTCallArgMarkProfile : SDTypeProfile<0, 0, []>;
-def SDTCallVoidProfile : SDTypeProfile<0, 1, []>;
-def SDTCallValProfile : SDTypeProfile<1, 0, []>;
-def SDTMoveParamProfile : SDTypeProfile<1, 1, []>;
-def SDTStoreRetvalProfile : SDTypeProfile<0, 2, [SDTCisInt<0>]>;
-def SDTStoreRetvalV2Profile : SDTypeProfile<0, 3, [SDTCisInt<0>]>;
-def SDTStoreRetvalV4Profile : SDTypeProfile<0, 5, [SDTCisInt<0>]>;
-def SDTPseudoUseParamProfile : SDTypeProfile<0, 1, []>;
-
-def DeclareParam :
- SDNode<"NVPTXISD::DeclareParam", SDTDeclareParamProfile,
- [SDNPHasChain, SDNPOutGlue, SDNPInGlue, SDNPSideEffect]>;
-def DeclareScalarParam :
- SDNode<"NVPTXISD::DeclareScalarParam", SDTDeclareScalarParamProfile,
- [SDNPHasChain, SDNPOutGlue, SDNPInGlue, SDNPSideEffect]>;
-def DeclareRetParam :
- SDNode<"NVPTXISD::DeclareRetParam", SDTDeclareParamProfile,
- [SDNPHasChain, SDNPOutGlue, SDNPInGlue, SDNPSideEffect]>;
-def DeclareRet :
- SDNode<"NVPTXISD::DeclareRet", SDTDeclareScalarParamProfile,
- [SDNPHasChain, SDNPOutGlue, SDNPInGlue, SDNPSideEffect]>;
-def LoadParam :
- SDNode<"NVPTXISD::LoadParam", SDTLoadParamProfile,
- [SDNPHasChain, SDNPMayLoad, SDNPOutGlue, SDNPInGlue]>;
-def LoadParamV2 :
- SDNode<"NVPTXISD::LoadParamV2", SDTLoadParamV2Profile,
- [SDNPHasChain, SDNPMayLoad, SDNPOutGlue, SDNPInGlue]>;
-def LoadParamV4 :
- SDNode<"NVPTXISD::LoadParamV4", SDTLoadParamV4Profile,
- [SDNPHasChain, SDNPMayLoad, SDNPOutGlue, SDNPInGlue]>;
-def PrintCall :
- SDNode<"NVPTXISD::PrintCall", SDTPrintCallProfile,
- [SDNPHasChain, SDNPOutGlue, SDNPInGlue, SDNPSideEffect]>;
-def PrintConvergentCall :
- SDNode<"NVPTXISD::PrintConvergentCall", SDTPrintCallProfile,
- [SDNPHasChain, SDNPOutGlue, SDNPInGlue, SDNPSideEffect]>;
-def PrintCallUni :
- SDNode<"NVPTXISD::PrintCallUni", SDTPrintCallUniProfile,
- [SDNPHasChain, SDNPOutGlue, SDNPInGlue, SDNPSideEffect]>;
-def PrintConvergentCallUni :
- SDNode<"NVPTXISD::PrintConvergentCallUni", SDTPrintCallUniProfile,
- [SDNPHasChain, SDNPOutGlue, SDNPInGlue, SDNPSideEffect]>;
-def StoreParam :
- SDNode<"NVPTXISD::StoreParam", SDTStoreParamProfile,
- [SDNPHasChain, SDNPOutGlue, SDNPInGlue, SDNPSideEffect]>;
-def StoreParamV2 :
- SDNode<"NVPTXISD::StoreParamV2", SDTStoreParamV2Profile,
- [SDNPHasChain, SDNPOutGlue, SDNPInGlue, SDNPSideEffect]>;
-def StoreParamV4 :
- SDNode<"NVPTXISD::StoreParamV4", SDTStoreParamV4Profile,
- [SDNPHasChain, SDNPOutGlue, SDNPInGlue, SDNPSideEffect]>;
-def StoreParamU32 :
- SDNode<"NVPTXISD::StoreParamU32", SDTStoreParam32Profile,
- [SDNPHasChain, SDNPOutGlue, SDNPInGlue, SDNPSideEffect]>;
-def StoreParamS32 :
- SDNode<"NVPTXISD::StoreParamS32", SDTStoreParam32Profile,
- [SDNPHasChain, SDNPOutGlue, SDNPInGlue, SDNPSideEffect]>;
-def CallArgBegin :
- SDNode<"NVPTXISD::CallArgBegin", SDTCallArgMarkProfile,
- [SDNPHasChain, SDNPOutGlue, SDNPInGlue, SDNPSideEffect]>;
-def CallArg :
- SDNode<"NVPTXISD::CallArg", SDTCallArgProfile,
- [SDNPHasChain, SDNPOutGlue, SDNPInGlue, SDNPSideEffect]>;
-def LastCallArg :
- SDNode<"NVPTXISD::LastCallArg", SDTCallArgProfile,
- [SDNPHasChain, SDNPOutGlue, SDNPInGlue, SDNPSideEffect]>;
-def CallArgEnd :
- SDNode<"NVPTXISD::CallArgEnd", SDTCallVoidProfile,
- [SDNPHasChain, SDNPOutGlue, SDNPInGlue, SDNPSideEffect]>;
-def CallVoid :
- SDNode<"NVPTXISD::CallVoid", SDTCallVoidProfile,
- [SDNPHasChain, SDNPOutGlue, SDNPInGlue, SDNPSideEffect]>;
-def Prototype :
- SDNode<"NVPTXISD::Prototype", SDTCallVoidProfile,
- [SDNPHasChain, SDNPOutGlue, SDNPInGlue, SDNPSideEffect]>;
-def CallVal :
- SDNode<"NVPTXISD::CallVal", SDTCallValProfile,
- [SDNPHasChain, SDNPOutGlue, SDNPInGlue, SDNPSideEffect]>;
-def MoveParam :
- SDNode<"NVPTXISD::MoveParam", SDTMoveParamProfile, []>;
-def StoreRetval :
- SDNode<"NVPTXISD::StoreRetval", SDTStoreRetvalProfile,
- [SDNPHasChain, SDNPSideEffect]>;
-def StoreRetvalV2 :
- SDNode<"NVPTXISD::StoreRetvalV2", SDTStoreRetvalV2Profile,
- [SDNPHasChain, SDNPSideEffect]>;
-def StoreRetvalV4 :
- SDNode<"NVPTXISD::StoreRetvalV4", SDTStoreRetvalV4Profile,
- [SDNPHasChain, SDNPSideEffect]>;
-def PseudoUseParam :
- SDNode<"NVPTXISD::PseudoUseParam", SDTPseudoUseParamProfile,
- [SDNPHasChain, SDNPOutGlue, SDNPInGlue, SDNPSideEffect]>;
-def RETURNNode :
- SDNode<"NVPTXISD::RETURN", SDTCallArgMarkProfile,
- [SDNPHasChain, SDNPSideEffect]>;
-
-let mayLoad = 1 in {
- class LoadParamMemInst<NVPTXRegClass regclass, string opstr> :
- NVPTXInst<(outs regclass:$dst), (ins i32imm:$b),
- !strconcat("ld.param", opstr, " \t$dst, [retval0+$b];"),
- []>;
-
- class LoadParamV2MemInst<NVPTXRegClass regclass, string opstr> :
- NVPTXInst<(outs regclass:$dst, regclass:$dst2), (ins i32imm:$b),
- !strconcat("ld.param.v2", opstr,
- " \t{{$dst, $dst2}}, [retval0+$b];"), []>;
-
- class LoadParamV4MemInst<NVPTXRegClass regclass, string opstr> :
- NVPTXInst<(outs regclass:$dst, regclass:$dst2, regclass:$dst3,
- regclass:$dst4),
- (ins i32imm:$b),
- !strconcat("ld.param.v4", opstr,
- " \t{{$dst, $dst2, $dst3, $dst4}}, [retval0+$b];"),
- []>;
-}
-
-class LoadParamRegInst<NVPTXRegClass regclass, string opstr> :
- NVPTXInst<(outs regclass:$dst), (ins i32imm:$b),
- !strconcat("mov", opstr, " \t$dst, retval$b;"),
- [(set regclass:$dst, (LoadParam (i32 0), (i32 imm:$b)))]>;
-
-let mayStore = 1 in {
- class StoreParamInst<NVPTXRegClass regclass, string opstr> :
- NVPTXInst<(outs), (ins regclass:$val, i32imm:$a, i32imm:$b),
- !strconcat("st.param", opstr, " \t[param$a+$b], $val;"),
- []>;
-
- class StoreParamV2Inst<NVPTXRegClass regclass, string opstr> :
- NVPTXInst<(outs), (ins regclass:$val, regclass:$val2,
- i32imm:$a, i32imm:$b),
- !strconcat("st.param.v2", opstr,
- " \t[param$a+$b], {{$val, $val2}};"),
- []>;
-
- class StoreParamV4Inst<NVPTXRegClass regclass, string opstr> :
- NVPTXInst<(outs), (ins regclass:$val, regclass:$val2, regclass:$val3,
- regclass:$val4, i32imm:$a,
- i32imm:$b),
- !strconcat("st.param.v4", opstr,
- " \t[param$a+$b], {{$val, $val2, $val3, $val4}};"),
- []>;
-
- class StoreRetvalInst<NVPTXRegClass regclass, string opstr> :
- NVPTXInst<(outs), (ins regclass:$val, i32imm:$a),
- !strconcat("st.param", opstr, " \t[func_retval0+$a], $val;"),
- []>;
-
- class StoreRetvalV2Inst<NVPTXRegClass regclass, string opstr> :
- NVPTXInst<(outs), (ins regclass:$val, regclass:$val2, i32imm:$a),
- !strconcat("st.param.v2", opstr,
- " \t[func_retval0+$a], {{$val, $val2}};"),
- []>;
-
- class StoreRetvalV4Inst<NVPTXRegClass regclass, string opstr> :
- NVPTXInst<(outs),
- (ins regclass:$val, regclass:$val2, regclass:$val3,
- regclass:$val4, i32imm:$a),
- !strconcat("st.param.v4", opstr,
- " \t[func_retval0+$a], {{$val, $val2, $val3, $val4}};"),
- []>;
-}
-
-let isCall=1 in {
- multiclass CALL<string OpcStr, SDNode OpNode> {
- def PrintCallNoRetInst : NVPTXInst<(outs), (ins),
- !strconcat(OpcStr, " "), [(OpNode (i32 0))]>;
- def PrintCallRetInst1 : NVPTXInst<(outs), (ins),
- !strconcat(OpcStr, " (retval0), "), [(OpNode (i32 1))]>;
- def PrintCallRetInst2 : NVPTXInst<(outs), (ins),
- !strconcat(OpcStr, " (retval0, retval1), "), [(OpNode (i32 2))]>;
- def PrintCallRetInst3 : NVPTXInst<(outs), (ins),
- !strconcat(OpcStr, " (retval0, retval1, retval2), "), [(OpNode (i32 3))]>;
- def PrintCallRetInst4 : NVPTXInst<(outs), (ins),
- !strconcat(OpcStr, " (retval0, retval1, retval2, retval3), "),
- [(OpNode (i32 4))]>;
- def PrintCallRetInst5 : NVPTXInst<(outs), (ins),
- !strconcat(OpcStr, " (retval0, retval1, retval2, retval3, retval4), "),
- [(OpNode (i32 5))]>;
- def PrintCallRetInst6 : NVPTXInst<(outs), (ins),
- !strconcat(OpcStr, " (retval0, retval1, retval2, retval3, retval4, "
- "retval5), "),
- [(OpNode (i32 6))]>;
- def PrintCallRetInst7 : NVPTXInst<(outs), (ins),
- !strconcat(OpcStr, " (retval0, retval1, retval2, retval3, retval4, "
- "retval5, retval6), "),
- [(OpNode (i32 7))]>;
- def PrintCallRetInst8 : NVPTXInst<(outs), (ins),
- !strconcat(OpcStr, " (retval0, retval1, retval2, retval3, retval4, "
- "retval5, retval6, retval7), "),
- [(OpNode (i32 8))]>;
- }
-}
-
-defm Call : CALL<"call", PrintCall>;
-defm CallUni : CALL<"call.uni", PrintCallUni>;
-
-// Convergent call instructions. These are identical to regular calls, except
-// they have the isConvergent bit set.
-let isConvergent=1 in {
- defm ConvergentCall : CALL<"call", PrintConvergentCall>;
- defm ConvergentCallUni : CALL<"call.uni", PrintConvergentCallUni>;
-}
-
-def LoadParamMemI64 : LoadParamMemInst<Int64Regs, ".b64">;
-def LoadParamMemI32 : LoadParamMemInst<Int32Regs, ".b32">;
-def LoadParamMemI16 : LoadParamMemInst<Int16Regs, ".b16">;
-def LoadParamMemI8 : LoadParamMemInst<Int16Regs, ".b8">;
-def LoadParamMemV2I64 : LoadParamV2MemInst<Int64Regs, ".b64">;
-def LoadParamMemV2I32 : LoadParamV2MemInst<Int32Regs, ".b32">;
-def LoadParamMemV2I16 : LoadParamV2MemInst<Int16Regs, ".b16">;
-def LoadParamMemV2I8 : LoadParamV2MemInst<Int16Regs, ".b8">;
-def LoadParamMemV4I32 : LoadParamV4MemInst<Int32Regs, ".b32">;
-def LoadParamMemV4I16 : LoadParamV4MemInst<Int16Regs, ".b16">;
-def LoadParamMemV4I8 : LoadParamV4MemInst<Int16Regs, ".b8">;
-def LoadParamMemF16 : LoadParamMemInst<Float16Regs, ".b16">;
-def LoadParamMemF16x2 : LoadParamMemInst<Float16x2Regs, ".b32">;
-def LoadParamMemF32 : LoadParamMemInst<Float32Regs, ".f32">;
-def LoadParamMemF64 : LoadParamMemInst<Float64Regs, ".f64">;
-def LoadParamMemV2F16 : LoadParamV2MemInst<Float16Regs, ".b16">;
-def LoadParamMemV2F16x2: LoadParamV2MemInst<Float16x2Regs, ".b32">;
-def LoadParamMemV2F32 : LoadParamV2MemInst<Float32Regs, ".f32">;
-def LoadParamMemV2F64 : LoadParamV2MemInst<Float64Regs, ".f64">;
-def LoadParamMemV4F16 : LoadParamV4MemInst<Float16Regs, ".b16">;
-def LoadParamMemV4F16x2: LoadParamV4MemInst<Float16x2Regs, ".b32">;
-def LoadParamMemV4F32 : LoadParamV4MemInst<Float32Regs, ".f32">;
-
-def StoreParamI64 : StoreParamInst<Int64Regs, ".b64">;
-def StoreParamI32 : StoreParamInst<Int32Regs, ".b32">;
-
-def StoreParamI16 : StoreParamInst<Int16Regs, ".b16">;
-def StoreParamI8 : StoreParamInst<Int16Regs, ".b8">;
-def StoreParamV2I64 : StoreParamV2Inst<Int64Regs, ".b64">;
-def StoreParamV2I32 : StoreParamV2Inst<Int32Regs, ".b32">;
-def StoreParamV2I16 : StoreParamV2Inst<Int16Regs, ".b16">;
-def StoreParamV2I8 : StoreParamV2Inst<Int16Regs, ".b8">;
-
-def StoreParamV4I32 : StoreParamV4Inst<Int32Regs, ".b32">;
-def StoreParamV4I16 : StoreParamV4Inst<Int16Regs, ".b16">;
-def StoreParamV4I8 : StoreParamV4Inst<Int16Regs, ".b8">;
-
-def StoreParamF16 : StoreParamInst<Float16Regs, ".b16">;
-def StoreParamF16x2 : StoreParamInst<Float16x2Regs, ".b32">;
-def StoreParamF32 : StoreParamInst<Float32Regs, ".f32">;
-def StoreParamF64 : StoreParamInst<Float64Regs, ".f64">;
-def StoreParamV2F16 : StoreParamV2Inst<Float16Regs, ".b16">;
-def StoreParamV2F16x2 : StoreParamV2Inst<Float16x2Regs, ".b32">;
-def StoreParamV2F32 : StoreParamV2Inst<Float32Regs, ".f32">;
-def StoreParamV2F64 : StoreParamV2Inst<Float64Regs, ".f64">;
-def StoreParamV4F16 : StoreParamV4Inst<Float16Regs, ".b16">;
-def StoreParamV4F16x2 : StoreParamV4Inst<Float16x2Regs, ".b32">;
-def StoreParamV4F32 : StoreParamV4Inst<Float32Regs, ".f32">;
-
-def StoreRetvalI64 : StoreRetvalInst<Int64Regs, ".b64">;
-def StoreRetvalI32 : StoreRetvalInst<Int32Regs, ".b32">;
-def StoreRetvalI16 : StoreRetvalInst<Int16Regs, ".b16">;
-def StoreRetvalI8 : StoreRetvalInst<Int16Regs, ".b8">;
-def StoreRetvalV2I64 : StoreRetvalV2Inst<Int64Regs, ".b64">;
-def StoreRetvalV2I32 : StoreRetvalV2Inst<Int32Regs, ".b32">;
-def StoreRetvalV2I16 : StoreRetvalV2Inst<Int16Regs, ".b16">;
-def StoreRetvalV2I8 : StoreRetvalV2Inst<Int16Regs, ".b8">;
-def StoreRetvalV4I32 : StoreRetvalV4Inst<Int32Regs, ".b32">;
-def StoreRetvalV4I16 : StoreRetvalV4Inst<Int16Regs, ".b16">;
-def StoreRetvalV4I8 : StoreRetvalV4Inst<Int16Regs, ".b8">;
-
-def StoreRetvalF64 : StoreRetvalInst<Float64Regs, ".f64">;
-def StoreRetvalF32 : StoreRetvalInst<Float32Regs, ".f32">;
-def StoreRetvalF16 : StoreRetvalInst<Float16Regs, ".b16">;
-def StoreRetvalF16x2 : StoreRetvalInst<Float16x2Regs, ".b32">;
-def StoreRetvalV2F64 : StoreRetvalV2Inst<Float64Regs, ".f64">;
-def StoreRetvalV2F32 : StoreRetvalV2Inst<Float32Regs, ".f32">;
-def StoreRetvalV2F16 : StoreRetvalV2Inst<Float16Regs, ".b16">;
-def StoreRetvalV2F16x2: StoreRetvalV2Inst<Float16x2Regs, ".b32">;
-def StoreRetvalV4F32 : StoreRetvalV4Inst<Float32Regs, ".f32">;
-def StoreRetvalV4F16 : StoreRetvalV4Inst<Float16Regs, ".b16">;
-def StoreRetvalV4F16x2: StoreRetvalV4Inst<Float16x2Regs, ".b32">;
-
-def CallArgBeginInst : NVPTXInst<(outs), (ins), "(", [(CallArgBegin)]>;
-def CallArgEndInst1 : NVPTXInst<(outs), (ins), ");", [(CallArgEnd (i32 1))]>;
-def CallArgEndInst0 : NVPTXInst<(outs), (ins), ")", [(CallArgEnd (i32 0))]>;
-def RETURNInst : NVPTXInst<(outs), (ins), "ret;", [(RETURNNode)]>;
-
-class CallArgInst<NVPTXRegClass regclass> :
- NVPTXInst<(outs), (ins regclass:$a), "$a, ",
- [(CallArg (i32 0), regclass:$a)]>;
-
-class LastCallArgInst<NVPTXRegClass regclass> :
- NVPTXInst<(outs), (ins regclass:$a), "$a",
- [(LastCallArg (i32 0), regclass:$a)]>;
-
-def CallArgI64 : CallArgInst<Int64Regs>;
-def CallArgI32 : CallArgInst<Int32Regs>;
-def CallArgI16 : CallArgInst<Int16Regs>;
-def CallArgF64 : CallArgInst<Float64Regs>;
-def CallArgF32 : CallArgInst<Float32Regs>;
-
-def LastCallArgI64 : LastCallArgInst<Int64Regs>;
-def LastCallArgI32 : LastCallArgInst<Int32Regs>;
-def LastCallArgI16 : LastCallArgInst<Int16Regs>;
-def LastCallArgF64 : LastCallArgInst<Float64Regs>;
-def LastCallArgF32 : LastCallArgInst<Float32Regs>;
-
-def CallArgI32imm : NVPTXInst<(outs), (ins i32imm:$a), "$a, ",
- [(CallArg (i32 0), (i32 imm:$a))]>;
-def LastCallArgI32imm : NVPTXInst<(outs), (ins i32imm:$a), "$a",
- [(LastCallArg (i32 0), (i32 imm:$a))]>;
-
-def CallArgParam : NVPTXInst<(outs), (ins i32imm:$a), "param$a, ",
- [(CallArg (i32 1), (i32 imm:$a))]>;
-def LastCallArgParam : NVPTXInst<(outs), (ins i32imm:$a), "param$a",
- [(LastCallArg (i32 1), (i32 imm:$a))]>;
-
-def CallVoidInst : NVPTXInst<(outs), (ins imem:$addr), "$addr, ",
- [(CallVoid (Wrapper tglobaladdr:$addr))]>;
-def CallVoidInstReg : NVPTXInst<(outs), (ins Int32Regs:$addr), "$addr, ",
- [(CallVoid Int32Regs:$addr)]>;
-def CallVoidInstReg64 : NVPTXInst<(outs), (ins Int64Regs:$addr), "$addr, ",
- [(CallVoid Int64Regs:$addr)]>;
-def PrototypeInst : NVPTXInst<(outs), (ins i32imm:$val), ", prototype_$val;",
- [(Prototype (i32 imm:$val))]>;
-
-def DeclareRetMemInst :
- NVPTXInst<(outs), (ins i32imm:$align, i32imm:$size, i32imm:$num),
- ".param .align $align .b8 retval$num[$size];",
- [(DeclareRetParam (i32 imm:$align), (i32 imm:$size), (i32 imm:$num))]>;
-def DeclareRetScalarInst :
- NVPTXInst<(outs), (ins i32imm:$size, i32imm:$num),
- ".param .b$size retval$num;",
- [(DeclareRet (i32 1), (i32 imm:$size), (i32 imm:$num))]>;
-def DeclareRetRegInst :
- NVPTXInst<(outs), (ins i32imm:$size, i32imm:$num),
- ".reg .b$size retval$num;",
- [(DeclareRet (i32 2), (i32 imm:$size), (i32 imm:$num))]>;
-
-def DeclareParamInst :
- NVPTXInst<(outs), (ins i32imm:$align, i32imm:$a, i32imm:$size),
- ".param .align $align .b8 param$a[$size];",
- [(DeclareParam (i32 imm:$align), (i32 imm:$a), (i32 imm:$size))]>;
-def DeclareScalarParamInst :
- NVPTXInst<(outs), (ins i32imm:$a, i32imm:$size),
- ".param .b$size param$a;",
- [(DeclareScalarParam (i32 imm:$a), (i32 imm:$size), (i32 0))]>;
-def DeclareScalarRegInst :
- NVPTXInst<(outs), (ins i32imm:$a, i32imm:$size),
- ".reg .b$size param$a;",
- [(DeclareScalarParam (i32 imm:$a), (i32 imm:$size), (i32 1))]>;
-
-class MoveParamInst<NVPTXRegClass regclass, string asmstr> :
- NVPTXInst<(outs regclass:$dst), (ins regclass:$src),
- !strconcat("mov", asmstr, " \t$dst, $src;"),
- [(set regclass:$dst, (MoveParam regclass:$src))]>;
-
-def MoveParamI64 : MoveParamInst<Int64Regs, ".b64">;
-def MoveParamI32 : MoveParamInst<Int32Regs, ".b32">;
-def MoveParamI16 :
- NVPTXInst<(outs Int16Regs:$dst), (ins Int16Regs:$src),
- "cvt.u16.u32 \t$dst, $src;",
- [(set Int16Regs:$dst, (MoveParam Int16Regs:$src))]>;
-def MoveParamF64 : MoveParamInst<Float64Regs, ".f64">;
-def MoveParamF32 : MoveParamInst<Float32Regs, ".f32">;
-def MoveParamF16 : MoveParamInst<Float16Regs, ".f16">;
-
-class PseudoUseParamInst<NVPTXRegClass regclass> :
- NVPTXInst<(outs), (ins regclass:$src),
- "// Pseudo use of $src",
- [(PseudoUseParam regclass:$src)]>;
-
-def PseudoUseParamI64 : PseudoUseParamInst<Int64Regs>;
-def PseudoUseParamI32 : PseudoUseParamInst<Int32Regs>;
-def PseudoUseParamI16 : PseudoUseParamInst<Int16Regs>;
-def PseudoUseParamF64 : PseudoUseParamInst<Float64Regs>;
-def PseudoUseParamF32 : PseudoUseParamInst<Float32Regs>;
-
-
-//
-// Load / Store Handling
-//
-multiclass LD<NVPTXRegClass regclass> {
- def _avar : NVPTXInst<
- (outs regclass:$dst),
- (ins LdStCode:$isVol, LdStCode:$addsp, LdStCode:$Vec, LdStCode:$Sign,
- i32imm:$fromWidth, imem:$addr),
- "ld${isVol:volatile}${addsp:addsp}${Vec:vec}.${Sign:sign}$fromWidth "
- "\t$dst, [$addr];", []>;
- def _areg : NVPTXInst<
- (outs regclass:$dst),
- (ins LdStCode:$isVol, LdStCode:$addsp, LdStCode:$Vec, LdStCode:$Sign,
- i32imm:$fromWidth, Int32Regs:$addr),
- "ld${isVol:volatile}${addsp:addsp}${Vec:vec}.${Sign:sign}$fromWidth "
- "\t$dst, [$addr];", []>;
- def _areg_64 : NVPTXInst<
- (outs regclass:$dst),
- (ins LdStCode:$isVol, LdStCode:$addsp, LdStCode:$Vec, LdStCode:$Sign,
- i32imm:$fromWidth, Int64Regs:$addr),
- "ld${isVol:volatile}${addsp:addsp}${Vec:vec}.${Sign:sign}$fromWidth "
- "\t$dst, [$addr];", []>;
- def _ari : NVPTXInst<
- (outs regclass:$dst),
- (ins LdStCode:$isVol, LdStCode:$addsp, LdStCode:$Vec, LdStCode:$Sign,
- i32imm:$fromWidth, Int32Regs:$addr, i32imm:$offset),
- "ld${isVol:volatile}${addsp:addsp}${Vec:vec}.${Sign:sign}$fromWidth "
- "\t$dst, [$addr+$offset];", []>;
- def _ari_64 : NVPTXInst<
- (outs regclass:$dst),
- (ins LdStCode:$isVol, LdStCode:$addsp, LdStCode:$Vec,
- LdStCode:$Sign, i32imm:$fromWidth, Int64Regs:$addr, i32imm:$offset),
- "ld${isVol:volatile}${addsp:addsp}${Vec:vec}.${Sign:sign}$fromWidth "
- "\t$dst, [$addr+$offset];", []>;
- def _asi : NVPTXInst<
- (outs regclass:$dst),
- (ins LdStCode:$isVol, LdStCode:$addsp, LdStCode:$Vec,
- LdStCode:$Sign, i32imm:$fromWidth, imem:$addr, i32imm:$offset),
- "ld${isVol:volatile}${addsp:addsp}${Vec:vec}.${Sign:sign}$fromWidth "
- "\t$dst, [$addr+$offset];", []>;
-}
-
-let mayLoad=1, hasSideEffects=0 in {
- defm LD_i8 : LD<Int16Regs>;
- defm LD_i16 : LD<Int16Regs>;
- defm LD_i32 : LD<Int32Regs>;
- defm LD_i64 : LD<Int64Regs>;
- defm LD_f16 : LD<Float16Regs>;
- defm LD_f16x2 : LD<Float16x2Regs>;
- defm LD_f32 : LD<Float32Regs>;
- defm LD_f64 : LD<Float64Regs>;
-}
-
-multiclass ST<NVPTXRegClass regclass> {
- def _avar : NVPTXInst<
- (outs),
- (ins regclass:$src, LdStCode:$isVol, LdStCode:$addsp, LdStCode:$Vec,
- LdStCode:$Sign, i32imm:$toWidth, imem:$addr),
- "st${isVol:volatile}${addsp:addsp}${Vec:vec}.${Sign:sign}$toWidth"
- " \t[$addr], $src;", []>;
- def _areg : NVPTXInst<
- (outs),
- (ins regclass:$src, LdStCode:$isVol, LdStCode:$addsp,
- LdStCode:$Vec, LdStCode:$Sign, i32imm:$toWidth, Int32Regs:$addr),
- "st${isVol:volatile}${addsp:addsp}${Vec:vec}.${Sign:sign}$toWidth"
- " \t[$addr], $src;", []>;
- def _areg_64 : NVPTXInst<
- (outs),
- (ins regclass:$src, LdStCode:$isVol, LdStCode:$addsp, LdStCode:$Vec,
- LdStCode:$Sign, i32imm:$toWidth, Int64Regs:$addr),
- "st${isVol:volatile}${addsp:addsp}${Vec:vec}.${Sign:sign}$toWidth"
- " \t[$addr], $src;", []>;
- def _ari : NVPTXInst<
- (outs),
- (ins regclass:$src, LdStCode:$isVol, LdStCode:$addsp, LdStCode:$Vec,
- LdStCode:$Sign, i32imm:$toWidth, Int32Regs:$addr, i32imm:$offset),
- "st${isVol:volatile}${addsp:addsp}${Vec:vec}.${Sign:sign}$toWidth"
- " \t[$addr+$offset], $src;", []>;
- def _ari_64 : NVPTXInst<
- (outs),
- (ins regclass:$src, LdStCode:$isVol, LdStCode:$addsp, LdStCode:$Vec,
- LdStCode:$Sign, i32imm:$toWidth, Int64Regs:$addr, i32imm:$offset),
- "st${isVol:volatile}${addsp:addsp}${Vec:vec}.${Sign:sign}$toWidth"
- " \t[$addr+$offset], $src;", []>;
- def _asi : NVPTXInst<
- (outs),
- (ins regclass:$src, LdStCode:$isVol, LdStCode:$addsp, LdStCode:$Vec,
- LdStCode:$Sign, i32imm:$toWidth, imem:$addr, i32imm:$offset),
- "st${isVol:volatile}${addsp:addsp}${Vec:vec}.${Sign:sign}$toWidth"
- " \t[$addr+$offset], $src;", []>;
-}
-
-let mayStore=1, hasSideEffects=0 in {
- defm ST_i8 : ST<Int16Regs>;
- defm ST_i16 : ST<Int16Regs>;
- defm ST_i32 : ST<Int32Regs>;
- defm ST_i64 : ST<Int64Regs>;
- defm ST_f16 : ST<Float16Regs>;
- defm ST_f16x2 : ST<Float16x2Regs>;
- defm ST_f32 : ST<Float32Regs>;
- defm ST_f64 : ST<Float64Regs>;
-}
-
-// The following is used only in and after vector elementizations. Vector
-// elementization happens at the machine instruction level, so the following
-// instructions never appear in the DAG.
-multiclass LD_VEC<NVPTXRegClass regclass> {
- def _v2_avar : NVPTXInst<
- (outs regclass:$dst1, regclass:$dst2),
- (ins LdStCode:$isVol, LdStCode:$addsp, LdStCode:$Vec, LdStCode:$Sign,
- i32imm:$fromWidth, imem:$addr),
- "ld${isVol:volatile}${addsp:addsp}${Vec:vec}.${Sign:sign}$fromWidth "
- "\t{{$dst1, $dst2}}, [$addr];", []>;
- def _v2_areg : NVPTXInst<
- (outs regclass:$dst1, regclass:$dst2),
- (ins LdStCode:$isVol, LdStCode:$addsp, LdStCode:$Vec, LdStCode:$Sign,
- i32imm:$fromWidth, Int32Regs:$addr),
- "ld${isVol:volatile}${addsp:addsp}${Vec:vec}.${Sign:sign}$fromWidth "
- "\t{{$dst1, $dst2}}, [$addr];", []>;
- def _v2_areg_64 : NVPTXInst<
- (outs regclass:$dst1, regclass:$dst2),
- (ins LdStCode:$isVol, LdStCode:$addsp, LdStCode:$Vec, LdStCode:$Sign,
- i32imm:$fromWidth, Int64Regs:$addr),
- "ld${isVol:volatile}${addsp:addsp}${Vec:vec}.${Sign:sign}$fromWidth "
- "\t{{$dst1, $dst2}}, [$addr];", []>;
- def _v2_ari : NVPTXInst<
- (outs regclass:$dst1, regclass:$dst2),
- (ins LdStCode:$isVol, LdStCode:$addsp, LdStCode:$Vec, LdStCode:$Sign,
- i32imm:$fromWidth, Int32Regs:$addr, i32imm:$offset),
- "ld${isVol:volatile}${addsp:addsp}${Vec:vec}.${Sign:sign}$fromWidth "
- "\t{{$dst1, $dst2}}, [$addr+$offset];", []>;
- def _v2_ari_64 : NVPTXInst<
- (outs regclass:$dst1, regclass:$dst2),
- (ins LdStCode:$isVol, LdStCode:$addsp, LdStCode:$Vec, LdStCode:$Sign,
- i32imm:$fromWidth, Int64Regs:$addr, i32imm:$offset),
- "ld${isVol:volatile}${addsp:addsp}${Vec:vec}.${Sign:sign}$fromWidth "
- "\t{{$dst1, $dst2}}, [$addr+$offset];", []>;
- def _v2_asi : NVPTXInst<
- (outs regclass:$dst1, regclass:$dst2),
- (ins LdStCode:$isVol, LdStCode:$addsp, LdStCode:$Vec, LdStCode:$Sign,
- i32imm:$fromWidth, imem:$addr, i32imm:$offset),
- "ld${isVol:volatile}${addsp:addsp}${Vec:vec}.${Sign:sign}$fromWidth "
- "\t{{$dst1, $dst2}}, [$addr+$offset];", []>;
- def _v4_avar : NVPTXInst<
- (outs regclass:$dst1, regclass:$dst2, regclass:$dst3, regclass:$dst4),
- (ins LdStCode:$isVol, LdStCode:$addsp, LdStCode:$Vec, LdStCode:$Sign,
- i32imm:$fromWidth, imem:$addr),
- "ld${isVol:volatile}${addsp:addsp}${Vec:vec}.${Sign:sign}$fromWidth "
- "\t{{$dst1, $dst2, $dst3, $dst4}}, [$addr];", []>;
- def _v4_areg : NVPTXInst<
- (outs regclass:$dst1, regclass:$dst2, regclass:$dst3, regclass:$dst4),
- (ins LdStCode:$isVol, LdStCode:$addsp, LdStCode:$Vec, LdStCode:$Sign,
- i32imm:$fromWidth, Int32Regs:$addr),
- "ld${isVol:volatile}${addsp:addsp}${Vec:vec}.${Sign:sign}$fromWidth "
- "\t{{$dst1, $dst2, $dst3, $dst4}}, [$addr];", []>;
- def _v4_areg_64 : NVPTXInst<
- (outs regclass:$dst1, regclass:$dst2, regclass:$dst3, regclass:$dst4),
- (ins LdStCode:$isVol, LdStCode:$addsp, LdStCode:$Vec, LdStCode:$Sign,
- i32imm:$fromWidth, Int64Regs:$addr),
- "ld${isVol:volatile}${addsp:addsp}${Vec:vec}.${Sign:sign}$fromWidth "
- "\t{{$dst1, $dst2, $dst3, $dst4}}, [$addr];", []>;
- def _v4_ari : NVPTXInst<
- (outs regclass:$dst1, regclass:$dst2, regclass:$dst3, regclass:$dst4),
- (ins LdStCode:$isVol, LdStCode:$addsp, LdStCode:$Vec, LdStCode:$Sign,
- i32imm:$fromWidth, Int32Regs:$addr, i32imm:$offset),
- "ld${isVol:volatile}${addsp:addsp}${Vec:vec}.${Sign:sign}$fromWidth "
- "\t{{$dst1, $dst2, $dst3, $dst4}}, [$addr+$offset];", []>;
- def _v4_ari_64 : NVPTXInst<
- (outs regclass:$dst1, regclass:$dst2, regclass:$dst3, regclass:$dst4),
- (ins LdStCode:$isVol, LdStCode:$addsp, LdStCode:$Vec, LdStCode:$Sign,
- i32imm:$fromWidth, Int64Regs:$addr, i32imm:$offset),
- "ld${isVol:volatile}${addsp:addsp}${Vec:vec}.${Sign:sign}$fromWidth "
- "\t{{$dst1, $dst2, $dst3, $dst4}}, [$addr+$offset];", []>;
- def _v4_asi : NVPTXInst<
- (outs regclass:$dst1, regclass:$dst2, regclass:$dst3, regclass:$dst4),
- (ins LdStCode:$isVol, LdStCode:$addsp, LdStCode:$Vec, LdStCode:$Sign,
- i32imm:$fromWidth, imem:$addr, i32imm:$offset),
- "ld${isVol:volatile}${addsp:addsp}${Vec:vec}.${Sign:sign}$fromWidth "
- "\t{{$dst1, $dst2, $dst3, $dst4}}, [$addr+$offset];", []>;
-}
-let mayLoad=1, hasSideEffects=0 in {
- defm LDV_i8 : LD_VEC<Int16Regs>;
- defm LDV_i16 : LD_VEC<Int16Regs>;
- defm LDV_i32 : LD_VEC<Int32Regs>;
- defm LDV_i64 : LD_VEC<Int64Regs>;
- defm LDV_f16 : LD_VEC<Float16Regs>;
- defm LDV_f16x2 : LD_VEC<Float16x2Regs>;
- defm LDV_f32 : LD_VEC<Float32Regs>;
- defm LDV_f64 : LD_VEC<Float64Regs>;
-}
-
-multiclass ST_VEC<NVPTXRegClass regclass> {
- def _v2_avar : NVPTXInst<
- (outs),
- (ins regclass:$src1, regclass:$src2, LdStCode:$isVol, LdStCode:$addsp,
- LdStCode:$Vec, LdStCode:$Sign, i32imm:$fromWidth, imem:$addr),
- "st${isVol:volatile}${addsp:addsp}${Vec:vec}.${Sign:sign}$fromWidth "
- "\t[$addr], {{$src1, $src2}};", []>;
- def _v2_areg : NVPTXInst<
- (outs),
- (ins regclass:$src1, regclass:$src2, LdStCode:$isVol, LdStCode:$addsp,
- LdStCode:$Vec, LdStCode:$Sign, i32imm:$fromWidth, Int32Regs:$addr),
- "st${isVol:volatile}${addsp:addsp}${Vec:vec}.${Sign:sign}$fromWidth "
- "\t[$addr], {{$src1, $src2}};", []>;
- def _v2_areg_64 : NVPTXInst<
- (outs),
- (ins regclass:$src1, regclass:$src2, LdStCode:$isVol, LdStCode:$addsp,
- LdStCode:$Vec, LdStCode:$Sign, i32imm:$fromWidth, Int64Regs:$addr),
- "st${isVol:volatile}${addsp:addsp}${Vec:vec}.${Sign:sign}$fromWidth "
- "\t[$addr], {{$src1, $src2}};", []>;
- def _v2_ari : NVPTXInst<
- (outs),
- (ins regclass:$src1, regclass:$src2, LdStCode:$isVol, LdStCode:$addsp,
- LdStCode:$Vec, LdStCode:$Sign, i32imm:$fromWidth, Int32Regs:$addr,
- i32imm:$offset),
- "st${isVol:volatile}${addsp:addsp}${Vec:vec}.${Sign:sign}$fromWidth "
- "\t[$addr+$offset], {{$src1, $src2}};", []>;
- def _v2_ari_64 : NVPTXInst<
- (outs),
- (ins regclass:$src1, regclass:$src2, LdStCode:$isVol, LdStCode:$addsp,
- LdStCode:$Vec, LdStCode:$Sign, i32imm:$fromWidth, Int64Regs:$addr,
- i32imm:$offset),
- "st${isVol:volatile}${addsp:addsp}${Vec:vec}.${Sign:sign}$fromWidth "
- "\t[$addr+$offset], {{$src1, $src2}};", []>;
- def _v2_asi : NVPTXInst<
- (outs),
- (ins regclass:$src1, regclass:$src2, LdStCode:$isVol, LdStCode:$addsp,
- LdStCode:$Vec, LdStCode:$Sign, i32imm:$fromWidth, imem:$addr,
- i32imm:$offset),
- "st${isVol:volatile}${addsp:addsp}${Vec:vec}.${Sign:sign}$fromWidth "
- "\t[$addr+$offset], {{$src1, $src2}};", []>;
- def _v4_avar : NVPTXInst<
- (outs),
- (ins regclass:$src1, regclass:$src2, regclass:$src3, regclass:$src4,
- LdStCode:$isVol, LdStCode:$addsp, LdStCode:$Vec, LdStCode:$Sign,
- i32imm:$fromWidth, imem:$addr),
- "st${isVol:volatile}${addsp:addsp}${Vec:vec}.${Sign:sign}$fromWidth "
- "\t[$addr], {{$src1, $src2, $src3, $src4}};", []>;
- def _v4_areg : NVPTXInst<
- (outs),
- (ins regclass:$src1, regclass:$src2, regclass:$src3, regclass:$src4,
- LdStCode:$isVol, LdStCode:$addsp, LdStCode:$Vec, LdStCode:$Sign,
- i32imm:$fromWidth, Int32Regs:$addr),
- "st${isVol:volatile}${addsp:addsp}${Vec:vec}.${Sign:sign}$fromWidth "
- "\t[$addr], {{$src1, $src2, $src3, $src4}};", []>;
- def _v4_areg_64 : NVPTXInst<
- (outs),
- (ins regclass:$src1, regclass:$src2, regclass:$src3, regclass:$src4,
- LdStCode:$isVol, LdStCode:$addsp, LdStCode:$Vec, LdStCode:$Sign,
- i32imm:$fromWidth, Int64Regs:$addr),
- "st${isVol:volatile}${addsp:addsp}${Vec:vec}.${Sign:sign}$fromWidth "
- "\t[$addr], {{$src1, $src2, $src3, $src4}};", []>;
- def _v4_ari : NVPTXInst<
- (outs),
- (ins regclass:$src1, regclass:$src2, regclass:$src3, regclass:$src4,
- LdStCode:$isVol, LdStCode:$addsp, LdStCode:$Vec, LdStCode:$Sign,
- i32imm:$fromWidth, Int32Regs:$addr, i32imm:$offset),
- "st${isVol:volatile}${addsp:addsp}${Vec:vec}.${Sign:sign}$fromWidth "
- "\t[$addr+$offset], {{$src1, $src2, $src3, $src4}};", []>;
- def _v4_ari_64 : NVPTXInst<
- (outs),
- (ins regclass:$src1, regclass:$src2, regclass:$src3, regclass:$src4,
- LdStCode:$isVol, LdStCode:$addsp, LdStCode:$Vec, LdStCode:$Sign,
- i32imm:$fromWidth, Int64Regs:$addr, i32imm:$offset),
- "st${isVol:volatile}${addsp:addsp}${Vec:vec}.${Sign:sign}$fromWidth "
- "\t[$addr+$offset], {{$src1, $src2, $src3, $src4}};", []>;
- def _v4_asi : NVPTXInst<
- (outs),
- (ins regclass:$src1, regclass:$src2, regclass:$src3, regclass:$src4,
- LdStCode:$isVol, LdStCode:$addsp, LdStCode:$Vec, LdStCode:$Sign,
- i32imm:$fromWidth, imem:$addr, i32imm:$offset),
- "st${isVol:volatile}${addsp:addsp}${Vec:vec}.${Sign:sign}"
- "$fromWidth \t[$addr+$offset], {{$src1, $src2, $src3, $src4}};", []>;
-}
-
-let mayStore=1, hasSideEffects=0 in {
- defm STV_i8 : ST_VEC<Int16Regs>;
- defm STV_i16 : ST_VEC<Int16Regs>;
- defm STV_i32 : ST_VEC<Int32Regs>;
- defm STV_i64 : ST_VEC<Int64Regs>;
- defm STV_f16 : ST_VEC<Float16Regs>;
- defm STV_f16x2 : ST_VEC<Float16x2Regs>;
- defm STV_f32 : ST_VEC<Float32Regs>;
- defm STV_f64 : ST_VEC<Float64Regs>;
-}
-
-//---- Conversion ----
-
-class F_BITCONVERT<string SzStr, NVPTXRegClass regclassIn,
- NVPTXRegClass regclassOut> :
- NVPTXInst<(outs regclassOut:$d), (ins regclassIn:$a),
- !strconcat("mov.b", !strconcat(SzStr, " \t$d, $a;")),
- [(set regclassOut:$d, (bitconvert regclassIn:$a))]>;
-
-def BITCONVERT_16_I2F : F_BITCONVERT<"16", Int16Regs, Float16Regs>;
-def BITCONVERT_16_F2I : F_BITCONVERT<"16", Float16Regs, Int16Regs>;
-def BITCONVERT_32_I2F : F_BITCONVERT<"32", Int32Regs, Float32Regs>;
-def BITCONVERT_32_F2I : F_BITCONVERT<"32", Float32Regs, Int32Regs>;
-def BITCONVERT_64_I2F : F_BITCONVERT<"64", Int64Regs, Float64Regs>;
-def BITCONVERT_64_F2I : F_BITCONVERT<"64", Float64Regs, Int64Regs>;
-def BITCONVERT_32_I2F16x2 : F_BITCONVERT<"32", Int32Regs, Float16x2Regs>;
-def BITCONVERT_32_F16x22I : F_BITCONVERT<"32", Float16x2Regs, Int32Regs>;
-
-// NOTE: pred->fp are currently sub-optimal due to an issue in TableGen where
-// we cannot specify floating-point literals in isel patterns. Therefore, we
-// use an integer selp to select either 1 or 0 and then cvt to floating-point.
-
-// sint -> f16
-def : Pat<(f16 (sint_to_fp Int1Regs:$a)),
- (CVT_f16_s32 (SELP_u32ii 1, 0, Int1Regs:$a), CvtRN)>;
-def : Pat<(f16 (sint_to_fp Int16Regs:$a)),
- (CVT_f16_s16 Int16Regs:$a, CvtRN)>;
-def : Pat<(f16 (sint_to_fp Int32Regs:$a)),
- (CVT_f16_s32 Int32Regs:$a, CvtRN)>;
-def : Pat<(f16 (sint_to_fp Int64Regs:$a)),
- (CVT_f16_s64 Int64Regs:$a, CvtRN)>;
-
-// uint -> f16
-def : Pat<(f16 (uint_to_fp Int1Regs:$a)),
- (CVT_f16_u32 (SELP_u32ii 1, 0, Int1Regs:$a), CvtRN)>;
-def : Pat<(f16 (uint_to_fp Int16Regs:$a)),
- (CVT_f16_u16 Int16Regs:$a, CvtRN)>;
-def : Pat<(f16 (uint_to_fp Int32Regs:$a)),
- (CVT_f16_u32 Int32Regs:$a, CvtRN)>;
-def : Pat<(f16 (uint_to_fp Int64Regs:$a)),
- (CVT_f16_u64 Int64Regs:$a, CvtRN)>;
-
-// sint -> f32
-def : Pat<(f32 (sint_to_fp Int1Regs:$a)),
- (CVT_f32_s32 (SELP_u32ii 1, 0, Int1Regs:$a), CvtRN)>;
-def : Pat<(f32 (sint_to_fp Int16Regs:$a)),
- (CVT_f32_s16 Int16Regs:$a, CvtRN)>;
-def : Pat<(f32 (sint_to_fp Int32Regs:$a)),
- (CVT_f32_s32 Int32Regs:$a, CvtRN)>;
-def : Pat<(f32 (sint_to_fp Int64Regs:$a)),
- (CVT_f32_s64 Int64Regs:$a, CvtRN)>;
-
-// uint -> f32
-def : Pat<(f32 (uint_to_fp Int1Regs:$a)),
- (CVT_f32_u32 (SELP_u32ii 1, 0, Int1Regs:$a), CvtRN)>;
-def : Pat<(f32 (uint_to_fp Int16Regs:$a)),
- (CVT_f32_u16 Int16Regs:$a, CvtRN)>;
-def : Pat<(f32 (uint_to_fp Int32Regs:$a)),
- (CVT_f32_u32 Int32Regs:$a, CvtRN)>;
-def : Pat<(f32 (uint_to_fp Int64Regs:$a)),
- (CVT_f32_u64 Int64Regs:$a, CvtRN)>;
-
-// sint -> f64
-def : Pat<(f64 (sint_to_fp Int1Regs:$a)),
- (CVT_f64_s32 (SELP_u32ii 1, 0, Int1Regs:$a), CvtRN)>;
-def : Pat<(f64 (sint_to_fp Int16Regs:$a)),
- (CVT_f64_s16 Int16Regs:$a, CvtRN)>;
-def : Pat<(f64 (sint_to_fp Int32Regs:$a)),
- (CVT_f64_s32 Int32Regs:$a, CvtRN)>;
-def : Pat<(f64 (sint_to_fp Int64Regs:$a)),
- (CVT_f64_s64 Int64Regs:$a, CvtRN)>;
-
-// uint -> f64
-def : Pat<(f64 (uint_to_fp Int1Regs:$a)),
- (CVT_f64_u32 (SELP_u32ii 1, 0, Int1Regs:$a), CvtRN)>;
-def : Pat<(f64 (uint_to_fp Int16Regs:$a)),
- (CVT_f64_u16 Int16Regs:$a, CvtRN)>;
-def : Pat<(f64 (uint_to_fp Int32Regs:$a)),
- (CVT_f64_u32 Int32Regs:$a, CvtRN)>;
-def : Pat<(f64 (uint_to_fp Int64Regs:$a)),
- (CVT_f64_u64 Int64Regs:$a, CvtRN)>;
-
-
-// f16 -> sint
-def : Pat<(i1 (fp_to_sint Float16Regs:$a)),
- (SETP_b16ri (BITCONVERT_16_F2I Float16Regs:$a), 0, CmpEQ)>;
-def : Pat<(i16 (fp_to_sint Float16Regs:$a)),
- (CVT_s16_f16 Float16Regs:$a, CvtRZI_FTZ)>, Requires<[doF32FTZ]>;
-def : Pat<(i16 (fp_to_sint Float16Regs:$a)),
- (CVT_s16_f16 Float16Regs:$a, CvtRZI)>;
-def : Pat<(i32 (fp_to_sint Float16Regs:$a)),
- (CVT_s32_f16 Float16Regs:$a, CvtRZI_FTZ)>, Requires<[doF32FTZ]>;
-def : Pat<(i32 (fp_to_sint Float16Regs:$a)),
- (CVT_s32_f16 Float16Regs:$a, CvtRZI)>;
-def : Pat<(i64 (fp_to_sint Float16Regs:$a)),
- (CVT_s64_f16 Float16Regs:$a, CvtRZI_FTZ)>, Requires<[doF32FTZ]>;
-def : Pat<(i64 (fp_to_sint Float16Regs:$a)),
- (CVT_s64_f16 Float16Regs:$a, CvtRZI)>;
-
-// f16 -> uint
-def : Pat<(i1 (fp_to_uint Float16Regs:$a)),
- (SETP_b16ri (BITCONVERT_16_F2I Float16Regs:$a), 0, CmpEQ)>;
-def : Pat<(i16 (fp_to_uint Float16Regs:$a)),
- (CVT_u16_f16 Float16Regs:$a, CvtRZI_FTZ)>, Requires<[doF32FTZ]>;
-def : Pat<(i16 (fp_to_uint Float16Regs:$a)),
- (CVT_u16_f16 Float16Regs:$a, CvtRZI)>;
-def : Pat<(i32 (fp_to_uint Float16Regs:$a)),
- (CVT_u32_f16 Float16Regs:$a, CvtRZI_FTZ)>, Requires<[doF32FTZ]>;
-def : Pat<(i32 (fp_to_uint Float16Regs:$a)),
- (CVT_u32_f16 Float16Regs:$a, CvtRZI)>;
-def : Pat<(i64 (fp_to_uint Float16Regs:$a)),
- (CVT_u64_f16 Float16Regs:$a, CvtRZI_FTZ)>, Requires<[doF32FTZ]>;
-def : Pat<(i64 (fp_to_uint Float16Regs:$a)),
- (CVT_u64_f16 Float16Regs:$a, CvtRZI)>;
-
-// f32 -> sint
-def : Pat<(i1 (fp_to_sint Float32Regs:$a)),
- (SETP_b32ri (BITCONVERT_32_F2I Float32Regs:$a), 0, CmpEQ)>;
-def : Pat<(i16 (fp_to_sint Float32Regs:$a)),
- (CVT_s16_f32 Float32Regs:$a, CvtRZI_FTZ)>, Requires<[doF32FTZ]>;
-def : Pat<(i16 (fp_to_sint Float32Regs:$a)),
- (CVT_s16_f32 Float32Regs:$a, CvtRZI)>;
-def : Pat<(i32 (fp_to_sint Float32Regs:$a)),
- (CVT_s32_f32 Float32Regs:$a, CvtRZI_FTZ)>, Requires<[doF32FTZ]>;
-def : Pat<(i32 (fp_to_sint Float32Regs:$a)),
- (CVT_s32_f32 Float32Regs:$a, CvtRZI)>;
-def : Pat<(i64 (fp_to_sint Float32Regs:$a)),
- (CVT_s64_f32 Float32Regs:$a, CvtRZI_FTZ)>, Requires<[doF32FTZ]>;
-def : Pat<(i64 (fp_to_sint Float32Regs:$a)),
- (CVT_s64_f32 Float32Regs:$a, CvtRZI)>;
-
-// f32 -> uint
-def : Pat<(i1 (fp_to_uint Float32Regs:$a)),
- (SETP_b32ri (BITCONVERT_32_F2I Float32Regs:$a), 0, CmpEQ)>;
-def : Pat<(i16 (fp_to_uint Float32Regs:$a)),
- (CVT_u16_f32 Float32Regs:$a, CvtRZI_FTZ)>, Requires<[doF32FTZ]>;
-def : Pat<(i16 (fp_to_uint Float32Regs:$a)),
- (CVT_u16_f32 Float32Regs:$a, CvtRZI)>;
-def : Pat<(i32 (fp_to_uint Float32Regs:$a)),
- (CVT_u32_f32 Float32Regs:$a, CvtRZI_FTZ)>, Requires<[doF32FTZ]>;
-def : Pat<(i32 (fp_to_uint Float32Regs:$a)),
- (CVT_u32_f32 Float32Regs:$a, CvtRZI)>;
-def : Pat<(i64 (fp_to_uint Float32Regs:$a)),
- (CVT_u64_f32 Float32Regs:$a, CvtRZI_FTZ)>, Requires<[doF32FTZ]>;
-def : Pat<(i64 (fp_to_uint Float32Regs:$a)),
- (CVT_u64_f32 Float32Regs:$a, CvtRZI)>;
-
-// f64 -> sint
-def : Pat<(i1 (fp_to_sint Float64Regs:$a)),
- (SETP_b64ri (BITCONVERT_64_F2I Float64Regs:$a), 0, CmpEQ)>;
-def : Pat<(i16 (fp_to_sint Float64Regs:$a)),
- (CVT_s16_f64 Float64Regs:$a, CvtRZI)>;
-def : Pat<(i32 (fp_to_sint Float64Regs:$a)),
- (CVT_s32_f64 Float64Regs:$a, CvtRZI)>;
-def : Pat<(i64 (fp_to_sint Float64Regs:$a)),
- (CVT_s64_f64 Float64Regs:$a, CvtRZI)>;
-
-// f64 -> uint
-def : Pat<(i1 (fp_to_uint Float64Regs:$a)),
- (SETP_b64ri (BITCONVERT_64_F2I Float64Regs:$a), 0, CmpEQ)>;
-def : Pat<(i16 (fp_to_uint Float64Regs:$a)),
- (CVT_u16_f64 Float64Regs:$a, CvtRZI)>;
-def : Pat<(i32 (fp_to_uint Float64Regs:$a)),
- (CVT_u32_f64 Float64Regs:$a, CvtRZI)>;
-def : Pat<(i64 (fp_to_uint Float64Regs:$a)),
- (CVT_u64_f64 Float64Regs:$a, CvtRZI)>;
-
-// sext i1
-def : Pat<(i16 (sext Int1Regs:$a)),
- (SELP_s16ii -1, 0, Int1Regs:$a)>;
-def : Pat<(i32 (sext Int1Regs:$a)),
- (SELP_s32ii -1, 0, Int1Regs:$a)>;
-def : Pat<(i64 (sext Int1Regs:$a)),
- (SELP_s64ii -1, 0, Int1Regs:$a)>;
-
-// zext i1
-def : Pat<(i16 (zext Int1Regs:$a)),
- (SELP_u16ii 1, 0, Int1Regs:$a)>;
-def : Pat<(i32 (zext Int1Regs:$a)),
- (SELP_u32ii 1, 0, Int1Regs:$a)>;
-def : Pat<(i64 (zext Int1Regs:$a)),
- (SELP_u64ii 1, 0, Int1Regs:$a)>;
-
-// anyext i1
-def : Pat<(i16 (anyext Int1Regs:$a)),
- (SELP_u16ii -1, 0, Int1Regs:$a)>;
-def : Pat<(i32 (anyext Int1Regs:$a)),
- (SELP_u32ii -1, 0, Int1Regs:$a)>;
-def : Pat<(i64 (anyext Int1Regs:$a)),
- (SELP_u64ii -1, 0, Int1Regs:$a)>;
-
-// sext i16
-def : Pat<(i32 (sext Int16Regs:$a)),
- (CVT_s32_s16 Int16Regs:$a, CvtNONE)>;
-def : Pat<(i64 (sext Int16Regs:$a)),
- (CVT_s64_s16 Int16Regs:$a, CvtNONE)>;
-
-// zext i16
-def : Pat<(i32 (zext Int16Regs:$a)),
- (CVT_u32_u16 Int16Regs:$a, CvtNONE)>;
-def : Pat<(i64 (zext Int16Regs:$a)),
- (CVT_u64_u16 Int16Regs:$a, CvtNONE)>;
-
-// anyext i16
-def : Pat<(i32 (anyext Int16Regs:$a)),
- (CVT_u32_u16 Int16Regs:$a, CvtNONE)>;
-def : Pat<(i64 (anyext Int16Regs:$a)),
- (CVT_u64_u16 Int16Regs:$a, CvtNONE)>;
-
-// sext i32
-def : Pat<(i64 (sext Int32Regs:$a)),
- (CVT_s64_s32 Int32Regs:$a, CvtNONE)>;
-
-// zext i32
-def : Pat<(i64 (zext Int32Regs:$a)),
- (CVT_u64_u32 Int32Regs:$a, CvtNONE)>;
-
-// anyext i32
-def : Pat<(i64 (anyext Int32Regs:$a)),
- (CVT_u64_u32 Int32Regs:$a, CvtNONE)>;
-
-
-// truncate i64
-def : Pat<(i32 (trunc Int64Regs:$a)),
- (CVT_u32_u64 Int64Regs:$a, CvtNONE)>;
-def : Pat<(i16 (trunc Int64Regs:$a)),
- (CVT_u16_u64 Int64Regs:$a, CvtNONE)>;
-def : Pat<(i1 (trunc Int64Regs:$a)),
- (SETP_b64ri (ANDb64ri Int64Regs:$a, 1), 1, CmpEQ)>;
-
-// truncate i32
-def : Pat<(i16 (trunc Int32Regs:$a)),
- (CVT_u16_u32 Int32Regs:$a, CvtNONE)>;
-def : Pat<(i1 (trunc Int32Regs:$a)),
- (SETP_b32ri (ANDb32ri Int32Regs:$a, 1), 1, CmpEQ)>;
-
-// truncate i16
-def : Pat<(i1 (trunc Int16Regs:$a)),
- (SETP_b16ri (ANDb16ri Int16Regs:$a, 1), 1, CmpEQ)>;
-
-// sext_inreg
-def : Pat<(sext_inreg Int16Regs:$a, i8), (CVT_INREG_s16_s8 Int16Regs:$a)>;
-def : Pat<(sext_inreg Int32Regs:$a, i8), (CVT_INREG_s32_s8 Int32Regs:$a)>;
-def : Pat<(sext_inreg Int32Regs:$a, i16), (CVT_INREG_s32_s16 Int32Regs:$a)>;
-def : Pat<(sext_inreg Int64Regs:$a, i8), (CVT_INREG_s64_s8 Int64Regs:$a)>;
-def : Pat<(sext_inreg Int64Regs:$a, i16), (CVT_INREG_s64_s16 Int64Regs:$a)>;
-def : Pat<(sext_inreg Int64Regs:$a, i32), (CVT_INREG_s64_s32 Int64Regs:$a)>;
-
-
-// Select instructions with 32-bit predicates
-def : Pat<(select Int32Regs:$pred, Int16Regs:$a, Int16Regs:$b),
- (SELP_b16rr Int16Regs:$a, Int16Regs:$b,
- (SETP_b32ri (ANDb32ri Int32Regs:$pred, 1), 1, CmpEQ))>;
-def : Pat<(select Int32Regs:$pred, Int32Regs:$a, Int32Regs:$b),
- (SELP_b32rr Int32Regs:$a, Int32Regs:$b,
- (SETP_b32ri (ANDb32ri Int32Regs:$pred, 1), 1, CmpEQ))>;
-def : Pat<(select Int32Regs:$pred, Int64Regs:$a, Int64Regs:$b),
- (SELP_b64rr Int64Regs:$a, Int64Regs:$b,
- (SETP_b32ri (ANDb32ri Int32Regs:$pred, 1), 1, CmpEQ))>;
-def : Pat<(select Int32Regs:$pred, Float16Regs:$a, Float16Regs:$b),
- (SELP_f16rr Float16Regs:$a, Float16Regs:$b,
- (SETP_b32ri (ANDb32ri Int32Regs:$pred, 1), 1, CmpEQ))>;
-def : Pat<(select Int32Regs:$pred, Float32Regs:$a, Float32Regs:$b),
- (SELP_f32rr Float32Regs:$a, Float32Regs:$b,
- (SETP_b32ri (ANDb32ri Int32Regs:$pred, 1), 1, CmpEQ))>;
-def : Pat<(select Int32Regs:$pred, Float64Regs:$a, Float64Regs:$b),
- (SELP_f64rr Float64Regs:$a, Float64Regs:$b,
- (SETP_b32ri (ANDb32ri Int32Regs:$pred, 1), 1, CmpEQ))>;
-
-
-let hasSideEffects = 0 in {
- // pack a set of smaller int registers to a larger int register
- def V4I16toI64 : NVPTXInst<(outs Int64Regs:$d),
- (ins Int16Regs:$s1, Int16Regs:$s2,
- Int16Regs:$s3, Int16Regs:$s4),
- "mov.b64 \t$d, {{$s1, $s2, $s3, $s4}};", []>;
- def V2I16toI32 : NVPTXInst<(outs Int32Regs:$d),
- (ins Int16Regs:$s1, Int16Regs:$s2),
- "mov.b32 \t$d, {{$s1, $s2}};", []>;
- def V2I32toI64 : NVPTXInst<(outs Int64Regs:$d),
- (ins Int32Regs:$s1, Int32Regs:$s2),
- "mov.b64 \t$d, {{$s1, $s2}};", []>;
- def V2F32toF64 : NVPTXInst<(outs Float64Regs:$d),
- (ins Float32Regs:$s1, Float32Regs:$s2),
- "mov.b64 \t$d, {{$s1, $s2}};", []>;
-
- // unpack a larger int register to a set of smaller int registers
- def I64toV4I16 : NVPTXInst<(outs Int16Regs:$d1, Int16Regs:$d2,
- Int16Regs:$d3, Int16Regs:$d4),
- (ins Int64Regs:$s),
- "mov.b64 \t{{$d1, $d2, $d3, $d4}}, $s;", []>;
- def I32toV2I16 : NVPTXInst<(outs Int16Regs:$d1, Int16Regs:$d2),
- (ins Int32Regs:$s),
- "mov.b32 \t{{$d1, $d2}}, $s;", []>;
- def I64toV2I32 : NVPTXInst<(outs Int32Regs:$d1, Int32Regs:$d2),
- (ins Int64Regs:$s),
- "mov.b64 \t{{$d1, $d2}}, $s;", []>;
- def F64toV2F32 : NVPTXInst<(outs Float32Regs:$d1, Float32Regs:$d2),
- (ins Float64Regs:$s),
- "mov.b64 \t{{$d1, $d2}}, $s;", []>;
-
-}
-
-let hasSideEffects = 0 in {
- // Extract element of f16x2 register. PTX does not provide any way
- // to access elements of f16x2 vector directly, so we need to
- // extract it using a temporary register.
- def F16x2toF16_0 : NVPTXInst<(outs Float16Regs:$dst),
- (ins Float16x2Regs:$src),
- "{{ .reg .b16 \t%tmp_hi;\n\t"
- " mov.b32 \t{$dst, %tmp_hi}, $src; }}",
- [(set Float16Regs:$dst,
- (extractelt (v2f16 Float16x2Regs:$src), 0))]>;
- def F16x2toF16_1 : NVPTXInst<(outs Float16Regs:$dst),
- (ins Float16x2Regs:$src),
- "{{ .reg .b16 \t%tmp_lo;\n\t"
- " mov.b32 \t{%tmp_lo, $dst}, $src; }}",
- [(set Float16Regs:$dst,
- (extractelt (v2f16 Float16x2Regs:$src), 1))]>;
-
- // Coalesce two f16 registers into f16x2
- def BuildF16x2 : NVPTXInst<(outs Float16x2Regs:$dst),
- (ins Float16Regs:$a, Float16Regs:$b),
- "mov.b32 \t$dst, {{$a, $b}};",
- [(set Float16x2Regs:$dst,
- (build_vector (f16 Float16Regs:$a), (f16 Float16Regs:$b)))]>;
-
- // Directly initializing underlying the b32 register is one less SASS
- // instruction than than vector-packing move.
- def BuildF16x2i : NVPTXInst<(outs Float16x2Regs:$dst), (ins i32imm:$src),
- "mov.b32 \t$dst, $src;",
- []>;
-
- // Split f16x2 into two f16 registers.
- def SplitF16x2 : NVPTXInst<(outs Float16Regs:$lo, Float16Regs:$hi),
- (ins Float16x2Regs:$src),
- "mov.b32 \t{{$lo, $hi}}, $src;",
- []>;
- // Split an i32 into two f16
- def SplitI32toF16x2 : NVPTXInst<(outs Float16Regs:$lo, Float16Regs:$hi),
- (ins Int32Regs:$src),
- "mov.b32 \t{{$lo, $hi}}, $src;",
- []>;
-}
-
-// Count leading zeros
-let hasSideEffects = 0 in {
- def CLZr32 : NVPTXInst<(outs Int32Regs:$d), (ins Int32Regs:$a),
- "clz.b32 \t$d, $a;", []>;
- def CLZr64 : NVPTXInst<(outs Int32Regs:$d), (ins Int64Regs:$a),
- "clz.b64 \t$d, $a;", []>;
-}
-
-// 32-bit has a direct PTX instruction
-def : Pat<(ctlz Int32Regs:$a), (CLZr32 Int32Regs:$a)>;
-
-// The return type of the ctlz ISD node is the same as its input, but the PTX
-// ctz instruction always returns a 32-bit value. For ctlz.i64, convert the
-// ptx value to 64 bits to match the ISD node's semantics, unless we know we're
-// truncating back down to 32 bits.
-def : Pat<(ctlz Int64Regs:$a), (CVT_u64_u32 (CLZr64 Int64Regs:$a), CvtNONE)>;
-def : Pat<(i32 (trunc (ctlz Int64Regs:$a))), (CLZr64 Int64Regs:$a)>;
-
-// For 16-bit ctlz, we zero-extend to 32-bit, perform the count, then trunc the
-// result back to 16-bits if necessary. We also need to subtract 16 because
-// the high-order 16 zeros were counted.
-//
-// TODO: NVPTX has a mov.b32 b32reg, {imm, b16reg} instruction, which we could
-// use to save one SASS instruction (on sm_35 anyway):
-//
-// mov.b32 $tmp, {0xffff, $a}
-// ctlz.b32 $result, $tmp
-//
-// That is, instead of zero-extending the input to 32 bits, we'd "one-extend"
-// and then ctlz that value. This way we don't have to subtract 16 from the
-// result. Unfortunately today we don't have a way to generate
-// "mov b32reg, {b16imm, b16reg}", so we don't do this optimization.
-def : Pat<(ctlz Int16Regs:$a),
- (SUBi16ri (CVT_u16_u32
- (CLZr32 (CVT_u32_u16 Int16Regs:$a, CvtNONE)), CvtNONE), 16)>;
-def : Pat<(i32 (zext (ctlz Int16Regs:$a))),
- (SUBi32ri (CLZr32 (CVT_u32_u16 Int16Regs:$a, CvtNONE)), 16)>;
-
-// Population count
-let hasSideEffects = 0 in {
- def POPCr32 : NVPTXInst<(outs Int32Regs:$d), (ins Int32Regs:$a),
- "popc.b32 \t$d, $a;", []>;
- def POPCr64 : NVPTXInst<(outs Int32Regs:$d), (ins Int64Regs:$a),
- "popc.b64 \t$d, $a;", []>;
-}
-
-// 32-bit has a direct PTX instruction
-def : Pat<(ctpop Int32Regs:$a), (POPCr32 Int32Regs:$a)>;
-
-// For 64-bit, the result in PTX is actually 32-bit so we zero-extend to 64-bit
-// to match the LLVM semantics. Just as with ctlz.i64, we provide a second
-// pattern that avoids the type conversion if we're truncating the result to
-// i32 anyway.
-def : Pat<(ctpop Int64Regs:$a), (CVT_u64_u32 (POPCr64 Int64Regs:$a), CvtNONE)>;
-def : Pat<(i32 (trunc (ctpop Int64Regs:$a))), (POPCr64 Int64Regs:$a)>;
-
-// For 16-bit, we zero-extend to 32-bit, then trunc the result back to 16-bits.
-// If we know that we're storing into an i32, we can avoid the final trunc.
-def : Pat<(ctpop Int16Regs:$a),
- (CVT_u16_u32 (POPCr32 (CVT_u32_u16 Int16Regs:$a, CvtNONE)), CvtNONE)>;
-def : Pat<(i32 (zext (ctpop Int16Regs:$a))),
- (POPCr32 (CVT_u32_u16 Int16Regs:$a, CvtNONE))>;
-
-// fpround f32 -> f16
-def : Pat<(f16 (fpround Float32Regs:$a)),
- (CVT_f16_f32 Float32Regs:$a, CvtRN_FTZ)>, Requires<[doF32FTZ]>;
-def : Pat<(f16 (fpround Float32Regs:$a)),
- (CVT_f16_f32 Float32Regs:$a, CvtRN)>;
-
-// fpround f64 -> f16
-def : Pat<(f16 (fpround Float64Regs:$a)),
- (CVT_f16_f64 Float64Regs:$a, CvtRN_FTZ)>, Requires<[doF32FTZ]>;
-def : Pat<(f16 (fpround Float64Regs:$a)),
- (CVT_f16_f64 Float64Regs:$a, CvtRN)>;
-
-// fpround f64 -> f32
-def : Pat<(f32 (fpround Float64Regs:$a)),
- (CVT_f32_f64 Float64Regs:$a, CvtRN_FTZ)>, Requires<[doF32FTZ]>;
-def : Pat<(f32 (fpround Float64Regs:$a)),
- (CVT_f32_f64 Float64Regs:$a, CvtRN)>;
-
-// fpextend f16 -> f32
-def : Pat<(f32 (fpextend Float16Regs:$a)),
- (CVT_f32_f16 Float16Regs:$a, CvtNONE_FTZ)>, Requires<[doF32FTZ]>;
-def : Pat<(f32 (fpextend Float16Regs:$a)),
- (CVT_f32_f16 Float16Regs:$a, CvtNONE)>;
-
-// fpextend f16 -> f64
-def : Pat<(f64 (fpextend Float16Regs:$a)),
- (CVT_f64_f16 Float16Regs:$a, CvtNONE_FTZ)>, Requires<[doF32FTZ]>;
-def : Pat<(f64 (fpextend Float16Regs:$a)),
- (CVT_f64_f16 Float16Regs:$a, CvtNONE)>;
-
-// fpextend f32 -> f64
-def : Pat<(f64 (fpextend Float32Regs:$a)),
- (CVT_f64_f32 Float32Regs:$a, CvtNONE_FTZ)>, Requires<[doF32FTZ]>;
-def : Pat<(f64 (fpextend Float32Regs:$a)),
- (CVT_f64_f32 Float32Regs:$a, CvtNONE)>;
-
-def retflag : SDNode<"NVPTXISD::RET_FLAG", SDTNone,
- [SDNPHasChain, SDNPOptInGlue]>;
-
-// fceil, ffloor, fround, ftrunc.
-
-def : Pat<(fceil Float16Regs:$a),
- (CVT_f16_f16 Float16Regs:$a, CvtRPI_FTZ)>, Requires<[doF32FTZ]>;
-def : Pat<(fceil Float16Regs:$a),
- (CVT_f16_f16 Float16Regs:$a, CvtRPI)>, Requires<[doNoF32FTZ]>;
-def : Pat<(fceil Float32Regs:$a),
- (CVT_f32_f32 Float32Regs:$a, CvtRPI_FTZ)>, Requires<[doF32FTZ]>;
-def : Pat<(fceil Float32Regs:$a),
- (CVT_f32_f32 Float32Regs:$a, CvtRPI)>, Requires<[doNoF32FTZ]>;
-def : Pat<(fceil Float64Regs:$a),
- (CVT_f64_f64 Float64Regs:$a, CvtRPI)>;
-
-def : Pat<(ffloor Float16Regs:$a),
- (CVT_f16_f16 Float16Regs:$a, CvtRMI_FTZ)>, Requires<[doF32FTZ]>;
-def : Pat<(ffloor Float16Regs:$a),
- (CVT_f16_f16 Float16Regs:$a, CvtRMI)>, Requires<[doNoF32FTZ]>;
-def : Pat<(ffloor Float32Regs:$a),
- (CVT_f32_f32 Float32Regs:$a, CvtRMI_FTZ)>, Requires<[doF32FTZ]>;
-def : Pat<(ffloor Float32Regs:$a),
- (CVT_f32_f32 Float32Regs:$a, CvtRMI)>, Requires<[doNoF32FTZ]>;
-def : Pat<(ffloor Float64Regs:$a),
- (CVT_f64_f64 Float64Regs:$a, CvtRMI)>;
-
-def : Pat<(fround Float16Regs:$a),
- (CVT_f16_f16 Float16Regs:$a, CvtRNI_FTZ)>, Requires<[doF32FTZ]>;
-def : Pat<(f16 (fround Float16Regs:$a)),
- (CVT_f16_f16 Float16Regs:$a, CvtRNI)>, Requires<[doNoF32FTZ]>;
-def : Pat<(fround Float32Regs:$a),
- (CVT_f32_f32 Float32Regs:$a, CvtRNI_FTZ)>, Requires<[doF32FTZ]>;
-def : Pat<(f32 (fround Float32Regs:$a)),
- (CVT_f32_f32 Float32Regs:$a, CvtRNI)>, Requires<[doNoF32FTZ]>;
-def : Pat<(f64 (fround Float64Regs:$a)),
- (CVT_f64_f64 Float64Regs:$a, CvtRNI)>;
-
-def : Pat<(ftrunc Float16Regs:$a),
- (CVT_f16_f16 Float16Regs:$a, CvtRZI_FTZ)>, Requires<[doF32FTZ]>;
-def : Pat<(ftrunc Float16Regs:$a),
- (CVT_f16_f16 Float16Regs:$a, CvtRZI)>, Requires<[doNoF32FTZ]>;
-def : Pat<(ftrunc Float32Regs:$a),
- (CVT_f32_f32 Float32Regs:$a, CvtRZI_FTZ)>, Requires<[doF32FTZ]>;
-def : Pat<(ftrunc Float32Regs:$a),
- (CVT_f32_f32 Float32Regs:$a, CvtRZI)>, Requires<[doNoF32FTZ]>;
-def : Pat<(ftrunc Float64Regs:$a),
- (CVT_f64_f64 Float64Regs:$a, CvtRZI)>;
-
-// nearbyint and rint are implemented as rounding to nearest even. This isn't
-// strictly correct, because it causes us to ignore the rounding mode. But it
-// matches what CUDA's "libm" does.
-
-def : Pat<(fnearbyint Float16Regs:$a),
- (CVT_f16_f16 Float16Regs:$a, CvtRNI_FTZ)>, Requires<[doF32FTZ]>;
-def : Pat<(fnearbyint Float16Regs:$a),
- (CVT_f16_f16 Float16Regs:$a, CvtRNI)>, Requires<[doNoF32FTZ]>;
-def : Pat<(fnearbyint Float32Regs:$a),
- (CVT_f32_f32 Float32Regs:$a, CvtRNI_FTZ)>, Requires<[doF32FTZ]>;
-def : Pat<(fnearbyint Float32Regs:$a),
- (CVT_f32_f32 Float32Regs:$a, CvtRNI)>, Requires<[doNoF32FTZ]>;
-def : Pat<(fnearbyint Float64Regs:$a),
- (CVT_f64_f64 Float64Regs:$a, CvtRNI)>;
-
-def : Pat<(frint Float16Regs:$a),
- (CVT_f16_f16 Float16Regs:$a, CvtRNI_FTZ)>, Requires<[doF32FTZ]>;
-def : Pat<(frint Float16Regs:$a),
- (CVT_f16_f16 Float16Regs:$a, CvtRNI)>, Requires<[doNoF32FTZ]>;
-def : Pat<(frint Float32Regs:$a),
- (CVT_f32_f32 Float32Regs:$a, CvtRNI_FTZ)>, Requires<[doF32FTZ]>;
-def : Pat<(frint Float32Regs:$a),
- (CVT_f32_f32 Float32Regs:$a, CvtRNI)>, Requires<[doNoF32FTZ]>;
-def : Pat<(frint Float64Regs:$a),
- (CVT_f64_f64 Float64Regs:$a, CvtRNI)>;
-
-
-//-----------------------------------
-// Control-flow
-//-----------------------------------
-
-let isTerminator=1 in {
- let isReturn=1, isBarrier=1 in
- def Return : NVPTXInst<(outs), (ins), "ret;", [(retflag)]>;
-
- let isBranch=1 in
- def CBranch : NVPTXInst<(outs), (ins Int1Regs:$a, brtarget:$target),
- "@$a bra \t$target;",
- [(brcond Int1Regs:$a, bb:$target)]>;
- let isBranch=1 in
- def CBranchOther : NVPTXInst<(outs), (ins Int1Regs:$a, brtarget:$target),
- "@!$a bra \t$target;", []>;
-
- let isBranch=1, isBarrier=1 in
- def GOTO : NVPTXInst<(outs), (ins brtarget:$target),
- "bra.uni \t$target;", [(br bb:$target)]>;
-}
-
-def : Pat<(brcond Int32Regs:$a, bb:$target),
- (CBranch (SETP_u32ri Int32Regs:$a, 0, CmpNE), bb:$target)>;
-
-// SelectionDAGBuilder::visitSWitchCase() will invert the condition of a
-// conditional branch if the target block is the next block so that the code
-// can fall through to the target block. The invertion is done by 'xor
-// condition, 1', which will be translated to (setne condition, -1). Since ptx
-// supports '@!pred bra target', we should use it.
-def : Pat<(brcond (i1 (setne Int1Regs:$a, -1)), bb:$target),
- (CBranchOther Int1Regs:$a, bb:$target)>;
-
-// Call
-def SDT_NVPTXCallSeqStart : SDCallSeqStart<[SDTCisVT<0, i32>]>;
-def SDT_NVPTXCallSeqEnd : SDCallSeqEnd<[SDTCisVT<0, i32>, SDTCisVT<1, i32>]>;
-
-def callseq_start : SDNode<"ISD::CALLSEQ_START", SDT_NVPTXCallSeqStart,
- [SDNPHasChain, SDNPOutGlue, SDNPSideEffect]>;
-def callseq_end : SDNode<"ISD::CALLSEQ_END", SDT_NVPTXCallSeqEnd,
- [SDNPHasChain, SDNPOptInGlue, SDNPOutGlue,
- SDNPSideEffect]>;
-
-def SDT_NVPTXCall : SDTypeProfile<0, 1, [SDTCisVT<0, i32>]>;
-def call : SDNode<"NVPTXISD::CALL", SDT_NVPTXCall,
- [SDNPHasChain, SDNPOptInGlue, SDNPOutGlue]>;
-def calltarget : Operand<i32>;
-let isCall=1 in {
- def CALL : NVPTXInst<(outs), (ins calltarget:$dst), "call \t$dst, (1);", []>;
-}
-
-def : Pat<(call tglobaladdr:$dst), (CALL tglobaladdr:$dst)>;
-def : Pat<(call texternalsym:$dst), (CALL texternalsym:$dst)>;
-
-// Pseudo instructions.
-class Pseudo<dag outs, dag ins, string asmstr, list<dag> pattern>
- : NVPTXInst<outs, ins, asmstr, pattern>;
-
-def Callseq_Start :
- NVPTXInst<(outs), (ins i32imm:$amt),
- "\\{ // callseq $amt\n"
- "\t.reg .b32 temp_param_reg;",
- [(callseq_start timm:$amt)]>;
-def Callseq_End :
- NVPTXInst<(outs), (ins i32imm:$amt1, i32imm:$amt2),
- "\\} // callseq $amt1",
- [(callseq_end timm:$amt1, timm:$amt2)]>;
-
-// trap instruction
-def trapinst : NVPTXInst<(outs), (ins), "trap;", [(trap)]>;
-
-// Call prototype wrapper
-def SDTCallPrototype : SDTypeProfile<0, 1, [SDTCisInt<0>]>;
-def CallPrototype :
- SDNode<"NVPTXISD::CallPrototype", SDTCallPrototype,
- [SDNPHasChain, SDNPOutGlue, SDNPInGlue, SDNPSideEffect]>;
-def ProtoIdent : Operand<i32> {
- let PrintMethod = "printProtoIdent";
-}
-def CALL_PROTOTYPE :
- NVPTXInst<(outs), (ins ProtoIdent:$ident),
- "$ident", [(CallPrototype (i32 texternalsym:$ident))]>;
-
-
-include "NVPTXIntrinsics.td"
-
-
-//-----------------------------------
-// Notes
-//-----------------------------------
-// BSWAP is currently expanded. The following is a more efficient
-// - for < sm_20, use vector scalar mov, as tesla support native 16-bit register
-// - for sm_20, use pmpt (use vector scalar mov to get the pack and
-// unpack). sm_20 supports native 32-bit register, but not native 16-bit
-// register.
+//===- NVPTXInstrInfo.td - NVPTX Instruction defs -------------*- tblgen-*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file describes the PTX instructions in TableGen format.
+//
+//===----------------------------------------------------------------------===//
+
+include "NVPTXInstrFormats.td"
+
+// A NOP instruction
+let hasSideEffects = 0 in {
+ def NOP : NVPTXInst<(outs), (ins), "", []>;
+}
+
+let OperandType = "OPERAND_IMMEDIATE" in {
+ def f16imm : Operand<f16>;
+}
+
+// List of vector specific properties
+def isVecLD : VecInstTypeEnum<1>;
+def isVecST : VecInstTypeEnum<2>;
+def isVecBuild : VecInstTypeEnum<3>;
+def isVecShuffle : VecInstTypeEnum<4>;
+def isVecExtract : VecInstTypeEnum<5>;
+def isVecInsert : VecInstTypeEnum<6>;
+def isVecDest : VecInstTypeEnum<7>;
+def isVecOther : VecInstTypeEnum<15>;
+
+//===----------------------------------------------------------------------===//
+// NVPTX Operand Definitions.
+//===----------------------------------------------------------------------===//
+
+def brtarget : Operand<OtherVT>;
+
+// CVT conversion modes
+// These must match the enum in NVPTX.h
+def CvtNONE : PatLeaf<(i32 0x0)>;
+def CvtRNI : PatLeaf<(i32 0x1)>;
+def CvtRZI : PatLeaf<(i32 0x2)>;
+def CvtRMI : PatLeaf<(i32 0x3)>;
+def CvtRPI : PatLeaf<(i32 0x4)>;
+def CvtRN : PatLeaf<(i32 0x5)>;
+def CvtRZ : PatLeaf<(i32 0x6)>;
+def CvtRM : PatLeaf<(i32 0x7)>;
+def CvtRP : PatLeaf<(i32 0x8)>;
+
+def CvtNONE_FTZ : PatLeaf<(i32 0x10)>;
+def CvtRNI_FTZ : PatLeaf<(i32 0x11)>;
+def CvtRZI_FTZ : PatLeaf<(i32 0x12)>;
+def CvtRMI_FTZ : PatLeaf<(i32 0x13)>;
+def CvtRPI_FTZ : PatLeaf<(i32 0x14)>;
+def CvtRN_FTZ : PatLeaf<(i32 0x15)>;
+def CvtRZ_FTZ : PatLeaf<(i32 0x16)>;
+def CvtRM_FTZ : PatLeaf<(i32 0x17)>;
+def CvtRP_FTZ : PatLeaf<(i32 0x18)>;
+
+def CvtSAT : PatLeaf<(i32 0x20)>;
+def CvtSAT_FTZ : PatLeaf<(i32 0x30)>;
+
+def CvtMode : Operand<i32> {
+ let PrintMethod = "printCvtMode";
+}
+
+// Compare modes
+// These must match the enum in NVPTX.h
+def CmpEQ : PatLeaf<(i32 0)>;
+def CmpNE : PatLeaf<(i32 1)>;
+def CmpLT : PatLeaf<(i32 2)>;
+def CmpLE : PatLeaf<(i32 3)>;
+def CmpGT : PatLeaf<(i32 4)>;
+def CmpGE : PatLeaf<(i32 5)>;
+def CmpEQU : PatLeaf<(i32 10)>;
+def CmpNEU : PatLeaf<(i32 11)>;
+def CmpLTU : PatLeaf<(i32 12)>;
+def CmpLEU : PatLeaf<(i32 13)>;
+def CmpGTU : PatLeaf<(i32 14)>;
+def CmpGEU : PatLeaf<(i32 15)>;
+def CmpNUM : PatLeaf<(i32 16)>;
+def CmpNAN : PatLeaf<(i32 17)>;
+
+def CmpEQ_FTZ : PatLeaf<(i32 0x100)>;
+def CmpNE_FTZ : PatLeaf<(i32 0x101)>;
+def CmpLT_FTZ : PatLeaf<(i32 0x102)>;
+def CmpLE_FTZ : PatLeaf<(i32 0x103)>;
+def CmpGT_FTZ : PatLeaf<(i32 0x104)>;
+def CmpGE_FTZ : PatLeaf<(i32 0x105)>;
+def CmpEQU_FTZ : PatLeaf<(i32 0x10A)>;
+def CmpNEU_FTZ : PatLeaf<(i32 0x10B)>;
+def CmpLTU_FTZ : PatLeaf<(i32 0x10C)>;
+def CmpLEU_FTZ : PatLeaf<(i32 0x10D)>;
+def CmpGTU_FTZ : PatLeaf<(i32 0x10E)>;
+def CmpGEU_FTZ : PatLeaf<(i32 0x10F)>;
+def CmpNUM_FTZ : PatLeaf<(i32 0x110)>;
+def CmpNAN_FTZ : PatLeaf<(i32 0x111)>;
+
+def CmpMode : Operand<i32> {
+ let PrintMethod = "printCmpMode";
+}
+def VecElement : Operand<i32> {
+ let PrintMethod = "printVecElement";
+}
+
+//===----------------------------------------------------------------------===//
+// NVPTX Instruction Predicate Definitions
+//===----------------------------------------------------------------------===//
+
+
+def hasAtomRedG32 : Predicate<"Subtarget->hasAtomRedG32()">;
+def hasAtomRedS32 : Predicate<"Subtarget->hasAtomRedS32()">;
+def hasAtomRedGen32 : Predicate<"Subtarget->hasAtomRedGen32()">;
+def useAtomRedG32forGen32 :
+ Predicate<"!Subtarget->hasAtomRedGen32() && Subtarget->hasAtomRedG32()">;
+def hasBrkPt : Predicate<"Subtarget->hasBrkPt()">;
+def hasAtomRedG64 : Predicate<"Subtarget->hasAtomRedG64()">;
+def hasAtomRedS64 : Predicate<"Subtarget->hasAtomRedS64()">;
+def hasAtomRedGen64 : Predicate<"Subtarget->hasAtomRedGen64()">;
+def useAtomRedG64forGen64 :
+ Predicate<"!Subtarget->hasAtomRedGen64() && Subtarget->hasAtomRedG64()">;
+def hasAtomAddF32 : Predicate<"Subtarget->hasAtomAddF32()">;
+def hasAtomAddF64 : Predicate<"Subtarget->hasAtomAddF64()">;
+def hasAtomScope : Predicate<"Subtarget->hasAtomScope()">;
+def hasAtomBitwise64 : Predicate<"Subtarget->hasAtomBitwise64()">;
+def hasAtomMinMax64 : Predicate<"Subtarget->hasAtomMinMax64()">;
+def hasVote : Predicate<"Subtarget->hasVote()">;
+def hasDouble : Predicate<"Subtarget->hasDouble()">;
+def reqPTX20 : Predicate<"Subtarget->reqPTX20()">;
+def hasLDG : Predicate<"Subtarget->hasLDG()">;
+def hasLDU : Predicate<"Subtarget->hasLDU()">;
+def hasGenericLdSt : Predicate<"Subtarget->hasGenericLdSt()">;
+
+def doF32FTZ : Predicate<"useF32FTZ()">;
+def doNoF32FTZ : Predicate<"!useF32FTZ()">;
+
+def doMulWide : Predicate<"doMulWide">;
+
+def allowFMA : Predicate<"allowFMA()">;
+def noFMA : Predicate<"!allowFMA()">;
+def allowUnsafeFPMath : Predicate<"allowUnsafeFPMath()">;
+
+def do_DIVF32_APPROX : Predicate<"getDivF32Level()==0">;
+def do_DIVF32_FULL : Predicate<"getDivF32Level()==1">;
+
+def do_SQRTF32_APPROX : Predicate<"!usePrecSqrtF32()">;
+def do_SQRTF32_RN : Predicate<"usePrecSqrtF32()">;
+
+def hasHWROT32 : Predicate<"Subtarget->hasHWROT32()">;
+def noHWROT32 : Predicate<"!Subtarget->hasHWROT32()">;
+
+def true : Predicate<"true">;
+
+def hasPTX31 : Predicate<"Subtarget->getPTXVersion() >= 31">;
+
+def useFP16Math: Predicate<"Subtarget->allowFP16Math()">;
+
+//===----------------------------------------------------------------------===//
+// Some Common Instruction Class Templates
+//===----------------------------------------------------------------------===//
+
+// Template for instructions which take three int64, int32, or int16 args.
+// The instructions are named "<OpcStr><Width>" (e.g. "add.s64").
+multiclass I3<string OpcStr, SDNode OpNode> {
+ def i64rr :
+ NVPTXInst<(outs Int64Regs:$dst), (ins Int64Regs:$a, Int64Regs:$b),
+ !strconcat(OpcStr, "64 \t$dst, $a, $b;"),
+ [(set Int64Regs:$dst, (OpNode Int64Regs:$a, Int64Regs:$b))]>;
+ def i64ri :
+ NVPTXInst<(outs Int64Regs:$dst), (ins Int64Regs:$a, i64imm:$b),
+ !strconcat(OpcStr, "64 \t$dst, $a, $b;"),
+ [(set Int64Regs:$dst, (OpNode Int64Regs:$a, imm:$b))]>;
+ def i32rr :
+ NVPTXInst<(outs Int32Regs:$dst), (ins Int32Regs:$a, Int32Regs:$b),
+ !strconcat(OpcStr, "32 \t$dst, $a, $b;"),
+ [(set Int32Regs:$dst, (OpNode Int32Regs:$a, Int32Regs:$b))]>;
+ def i32ri :
+ NVPTXInst<(outs Int32Regs:$dst), (ins Int32Regs:$a, i32imm:$b),
+ !strconcat(OpcStr, "32 \t$dst, $a, $b;"),
+ [(set Int32Regs:$dst, (OpNode Int32Regs:$a, imm:$b))]>;
+ def i16rr :
+ NVPTXInst<(outs Int16Regs:$dst), (ins Int16Regs:$a, Int16Regs:$b),
+ !strconcat(OpcStr, "16 \t$dst, $a, $b;"),
+ [(set Int16Regs:$dst, (OpNode Int16Regs:$a, Int16Regs:$b))]>;
+ def i16ri :
+ NVPTXInst<(outs Int16Regs:$dst), (ins Int16Regs:$a, i16imm:$b),
+ !strconcat(OpcStr, "16 \t$dst, $a, $b;"),
+ [(set Int16Regs:$dst, (OpNode Int16Regs:$a, (imm):$b))]>;
+}
+
+// Template for instructions which take 3 int32 args. The instructions are
+// named "<OpcStr>.s32" (e.g. "addc.cc.s32").
+multiclass ADD_SUB_INT_32<string OpcStr, SDNode OpNode> {
+ def i32rr :
+ NVPTXInst<(outs Int32Regs:$dst), (ins Int32Regs:$a, Int32Regs:$b),
+ !strconcat(OpcStr, ".s32 \t$dst, $a, $b;"),
+ [(set Int32Regs:$dst, (OpNode Int32Regs:$a, Int32Regs:$b))]>;
+ def i32ri :
+ NVPTXInst<(outs Int32Regs:$dst), (ins Int32Regs:$a, i32imm:$b),
+ !strconcat(OpcStr, ".s32 \t$dst, $a, $b;"),
+ [(set Int32Regs:$dst, (OpNode Int32Regs:$a, imm:$b))]>;
+}
+
+// Template for instructions which take three fp64 or fp32 args. The
+// instructions are named "<OpcStr>.f<Width>" (e.g. "min.f64").
+//
+// Also defines ftz (flush subnormal inputs and results to sign-preserving
+// zero) variants for fp32 functions.
+//
+// This multiclass should be used for nodes that cannot be folded into FMAs.
+// For nodes that can be folded into FMAs (i.e. adds and muls), use
+// F3_fma_component.
+multiclass F3<string OpcStr, SDNode OpNode> {
+ def f64rr :
+ NVPTXInst<(outs Float64Regs:$dst),
+ (ins Float64Regs:$a, Float64Regs:$b),
+ !strconcat(OpcStr, ".f64 \t$dst, $a, $b;"),
+ [(set Float64Regs:$dst, (OpNode Float64Regs:$a, Float64Regs:$b))]>;
+ def f64ri :
+ NVPTXInst<(outs Float64Regs:$dst),
+ (ins Float64Regs:$a, f64imm:$b),
+ !strconcat(OpcStr, ".f64 \t$dst, $a, $b;"),
+ [(set Float64Regs:$dst, (OpNode Float64Regs:$a, fpimm:$b))]>;
+ def f32rr_ftz :
+ NVPTXInst<(outs Float32Regs:$dst),
+ (ins Float32Regs:$a, Float32Regs:$b),
+ !strconcat(OpcStr, ".ftz.f32 \t$dst, $a, $b;"),
+ [(set Float32Regs:$dst, (OpNode Float32Regs:$a, Float32Regs:$b))]>,
+ Requires<[doF32FTZ]>;
+ def f32ri_ftz :
+ NVPTXInst<(outs Float32Regs:$dst),
+ (ins Float32Regs:$a, f32imm:$b),
+ !strconcat(OpcStr, ".ftz.f32 \t$dst, $a, $b;"),
+ [(set Float32Regs:$dst, (OpNode Float32Regs:$a, fpimm:$b))]>,
+ Requires<[doF32FTZ]>;
+ def f32rr :
+ NVPTXInst<(outs Float32Regs:$dst),
+ (ins Float32Regs:$a, Float32Regs:$b),
+ !strconcat(OpcStr, ".f32 \t$dst, $a, $b;"),
+ [(set Float32Regs:$dst, (OpNode Float32Regs:$a, Float32Regs:$b))]>;
+ def f32ri :
+ NVPTXInst<(outs Float32Regs:$dst),
+ (ins Float32Regs:$a, f32imm:$b),
+ !strconcat(OpcStr, ".f32 \t$dst, $a, $b;"),
+ [(set Float32Regs:$dst, (OpNode Float32Regs:$a, fpimm:$b))]>;
+}
+
+// Template for instructions which take three FP args. The
+// instructions are named "<OpcStr>.f<Width>" (e.g. "add.f64").
+//
+// Also defines ftz (flush subnormal inputs and results to sign-preserving
+// zero) variants for fp32/fp16 functions.
+//
+// This multiclass should be used for nodes that can be folded to make fma ops.
+// In this case, we use the ".rn" variant when FMA is disabled, as this behaves
+// just like the non ".rn" op, but prevents ptxas from creating FMAs.
+multiclass F3_fma_component<string OpcStr, SDNode OpNode> {
+ def f64rr :
+ NVPTXInst<(outs Float64Regs:$dst),
+ (ins Float64Regs:$a, Float64Regs:$b),
+ !strconcat(OpcStr, ".f64 \t$dst, $a, $b;"),
+ [(set Float64Regs:$dst, (OpNode Float64Regs:$a, Float64Regs:$b))]>,
+ Requires<[allowFMA]>;
+ def f64ri :
+ NVPTXInst<(outs Float64Regs:$dst),
+ (ins Float64Regs:$a, f64imm:$b),
+ !strconcat(OpcStr, ".f64 \t$dst, $a, $b;"),
+ [(set Float64Regs:$dst, (OpNode Float64Regs:$a, fpimm:$b))]>,
+ Requires<[allowFMA]>;
+ def f32rr_ftz :
+ NVPTXInst<(outs Float32Regs:$dst),
+ (ins Float32Regs:$a, Float32Regs:$b),
+ !strconcat(OpcStr, ".ftz.f32 \t$dst, $a, $b;"),
+ [(set Float32Regs:$dst, (OpNode Float32Regs:$a, Float32Regs:$b))]>,
+ Requires<[allowFMA, doF32FTZ]>;
+ def f32ri_ftz :
+ NVPTXInst<(outs Float32Regs:$dst),
+ (ins Float32Regs:$a, f32imm:$b),
+ !strconcat(OpcStr, ".ftz.f32 \t$dst, $a, $b;"),
+ [(set Float32Regs:$dst, (OpNode Float32Regs:$a, fpimm:$b))]>,
+ Requires<[allowFMA, doF32FTZ]>;
+ def f32rr :
+ NVPTXInst<(outs Float32Regs:$dst),
+ (ins Float32Regs:$a, Float32Regs:$b),
+ !strconcat(OpcStr, ".f32 \t$dst, $a, $b;"),
+ [(set Float32Regs:$dst, (OpNode Float32Regs:$a, Float32Regs:$b))]>,
+ Requires<[allowFMA]>;
+ def f32ri :
+ NVPTXInst<(outs Float32Regs:$dst),
+ (ins Float32Regs:$a, f32imm:$b),
+ !strconcat(OpcStr, ".f32 \t$dst, $a, $b;"),
+ [(set Float32Regs:$dst, (OpNode Float32Regs:$a, fpimm:$b))]>,
+ Requires<[allowFMA]>;
+
+ def f16rr_ftz :
+ NVPTXInst<(outs Float16Regs:$dst),
+ (ins Float16Regs:$a, Float16Regs:$b),
+ !strconcat(OpcStr, ".ftz.f16 \t$dst, $a, $b;"),
+ [(set Float16Regs:$dst, (OpNode Float16Regs:$a, Float16Regs:$b))]>,
+ Requires<[useFP16Math, allowFMA, doF32FTZ]>;
+ def f16rr :
+ NVPTXInst<(outs Float16Regs:$dst),
+ (ins Float16Regs:$a, Float16Regs:$b),
+ !strconcat(OpcStr, ".f16 \t$dst, $a, $b;"),
+ [(set Float16Regs:$dst, (OpNode Float16Regs:$a, Float16Regs:$b))]>,
+ Requires<[useFP16Math, allowFMA]>;
+
+ def f16x2rr_ftz :
+ NVPTXInst<(outs Float16x2Regs:$dst),
+ (ins Float16x2Regs:$a, Float16x2Regs:$b),
+ !strconcat(OpcStr, ".ftz.f16x2 \t$dst, $a, $b;"),
+ [(set Float16x2Regs:$dst, (OpNode Float16x2Regs:$a, Float16x2Regs:$b))]>,
+ Requires<[useFP16Math, allowFMA, doF32FTZ]>;
+ def f16x2rr :
+ NVPTXInst<(outs Float16x2Regs:$dst),
+ (ins Float16x2Regs:$a, Float16x2Regs:$b),
+ !strconcat(OpcStr, ".f16x2 \t$dst, $a, $b;"),
+ [(set Float16x2Regs:$dst, (OpNode Float16x2Regs:$a, Float16x2Regs:$b))]>,
+ Requires<[useFP16Math, allowFMA]>;
+
+ // These have strange names so we don't perturb existing mir tests.
+ def _rnf64rr :
+ NVPTXInst<(outs Float64Regs:$dst),
+ (ins Float64Regs:$a, Float64Regs:$b),
+ !strconcat(OpcStr, ".rn.f64 \t$dst, $a, $b;"),
+ [(set Float64Regs:$dst, (OpNode Float64Regs:$a, Float64Regs:$b))]>,
+ Requires<[noFMA]>;
+ def _rnf64ri :
+ NVPTXInst<(outs Float64Regs:$dst),
+ (ins Float64Regs:$a, f64imm:$b),
+ !strconcat(OpcStr, ".rn.f64 \t$dst, $a, $b;"),
+ [(set Float64Regs:$dst, (OpNode Float64Regs:$a, fpimm:$b))]>,
+ Requires<[noFMA]>;
+ def _rnf32rr_ftz :
+ NVPTXInst<(outs Float32Regs:$dst),
+ (ins Float32Regs:$a, Float32Regs:$b),
+ !strconcat(OpcStr, ".rn.ftz.f32 \t$dst, $a, $b;"),
+ [(set Float32Regs:$dst, (OpNode Float32Regs:$a, Float32Regs:$b))]>,
+ Requires<[noFMA, doF32FTZ]>;
+ def _rnf32ri_ftz :
+ NVPTXInst<(outs Float32Regs:$dst),
+ (ins Float32Regs:$a, f32imm:$b),
+ !strconcat(OpcStr, ".rn.ftz.f32 \t$dst, $a, $b;"),
+ [(set Float32Regs:$dst, (OpNode Float32Regs:$a, fpimm:$b))]>,
+ Requires<[noFMA, doF32FTZ]>;
+ def _rnf32rr :
+ NVPTXInst<(outs Float32Regs:$dst),
+ (ins Float32Regs:$a, Float32Regs:$b),
+ !strconcat(OpcStr, ".rn.f32 \t$dst, $a, $b;"),
+ [(set Float32Regs:$dst, (OpNode Float32Regs:$a, Float32Regs:$b))]>,
+ Requires<[noFMA]>;
+ def _rnf32ri :
+ NVPTXInst<(outs Float32Regs:$dst),
+ (ins Float32Regs:$a, f32imm:$b),
+ !strconcat(OpcStr, ".rn.f32 \t$dst, $a, $b;"),
+ [(set Float32Regs:$dst, (OpNode Float32Regs:$a, fpimm:$b))]>,
+ Requires<[noFMA]>;
+ def _rnf16rr_ftz :
+ NVPTXInst<(outs Float16Regs:$dst),
+ (ins Float16Regs:$a, Float16Regs:$b),
+ !strconcat(OpcStr, ".rn.ftz.f16 \t$dst, $a, $b;"),
+ [(set Float16Regs:$dst, (OpNode Float16Regs:$a, Float16Regs:$b))]>,
+ Requires<[useFP16Math, noFMA, doF32FTZ]>;
+ def _rnf16rr :
+ NVPTXInst<(outs Float16Regs:$dst),
+ (ins Float16Regs:$a, Float16Regs:$b),
+ !strconcat(OpcStr, ".rn.f16 \t$dst, $a, $b;"),
+ [(set Float16Regs:$dst, (OpNode Float16Regs:$a, Float16Regs:$b))]>,
+ Requires<[useFP16Math, noFMA]>;
+ def _rnf16x2rr_ftz :
+ NVPTXInst<(outs Float16x2Regs:$dst),
+ (ins Float16x2Regs:$a, Float16x2Regs:$b),
+ !strconcat(OpcStr, ".rn.ftz.f16x2 \t$dst, $a, $b;"),
+ [(set Float16x2Regs:$dst, (OpNode Float16x2Regs:$a, Float16x2Regs:$b))]>,
+ Requires<[useFP16Math, noFMA, doF32FTZ]>;
+ def _rnf16x2rr :
+ NVPTXInst<(outs Float16x2Regs:$dst),
+ (ins Float16x2Regs:$a, Float16x2Regs:$b),
+ !strconcat(OpcStr, ".rn.f16x2 \t$dst, $a, $b;"),
+ [(set Float16x2Regs:$dst, (OpNode Float16x2Regs:$a, Float16x2Regs:$b))]>,
+ Requires<[useFP16Math, noFMA]>;
+}
+
+// Template for operations which take two f32 or f64 operands. Provides three
+// instructions: <OpcStr>.f64, <OpcStr>.f32, and <OpcStr>.ftz.f32 (flush
+// subnormal inputs and results to zero).
+multiclass F2<string OpcStr, SDNode OpNode> {
+ def f64 : NVPTXInst<(outs Float64Regs:$dst), (ins Float64Regs:$a),
+ !strconcat(OpcStr, ".f64 \t$dst, $a;"),
+ [(set Float64Regs:$dst, (OpNode Float64Regs:$a))]>;
+ def f32_ftz : NVPTXInst<(outs Float32Regs:$dst), (ins Float32Regs:$a),
+ !strconcat(OpcStr, ".ftz.f32 \t$dst, $a;"),
+ [(set Float32Regs:$dst, (OpNode Float32Regs:$a))]>,
+ Requires<[doF32FTZ]>;
+ def f32 : NVPTXInst<(outs Float32Regs:$dst), (ins Float32Regs:$a),
+ !strconcat(OpcStr, ".f32 \t$dst, $a;"),
+ [(set Float32Regs:$dst, (OpNode Float32Regs:$a))]>;
+}
+
+//===----------------------------------------------------------------------===//
+// NVPTX Instructions.
+//===----------------------------------------------------------------------===//
+
+//-----------------------------------
+// Type Conversion
+//-----------------------------------
+
+let hasSideEffects = 0 in {
+ // Generate a cvt to the given type from all possible types. Each instance
+ // takes a CvtMode immediate that defines the conversion mode to use. It can
+ // be CvtNONE to omit a conversion mode.
+ multiclass CVT_FROM_ALL<string FromName, RegisterClass RC> {
+ def _s8 :
+ NVPTXInst<(outs RC:$dst),
+ (ins Int16Regs:$src, CvtMode:$mode),
+ !strconcat("cvt${mode:base}${mode:ftz}${mode:sat}.",
+ FromName, ".s8 \t$dst, $src;"), []>;
+ def _u8 :
+ NVPTXInst<(outs RC:$dst),
+ (ins Int16Regs:$src, CvtMode:$mode),
+ !strconcat("cvt${mode:base}${mode:ftz}${mode:sat}.",
+ FromName, ".u8 \t$dst, $src;"), []>;
+ def _s16 :
+ NVPTXInst<(outs RC:$dst),
+ (ins Int16Regs:$src, CvtMode:$mode),
+ !strconcat("cvt${mode:base}${mode:ftz}${mode:sat}.",
+ FromName, ".s16 \t$dst, $src;"), []>;
+ def _u16 :
+ NVPTXInst<(outs RC:$dst),
+ (ins Int16Regs:$src, CvtMode:$mode),
+ !strconcat("cvt${mode:base}${mode:ftz}${mode:sat}.",
+ FromName, ".u16 \t$dst, $src;"), []>;
+ def _s32 :
+ NVPTXInst<(outs RC:$dst),
+ (ins Int32Regs:$src, CvtMode:$mode),
+ !strconcat("cvt${mode:base}${mode:ftz}${mode:sat}.",
+ FromName, ".s32 \t$dst, $src;"), []>;
+ def _u32 :
+ NVPTXInst<(outs RC:$dst),
+ (ins Int32Regs:$src, CvtMode:$mode),
+ !strconcat("cvt${mode:base}${mode:ftz}${mode:sat}.",
+ FromName, ".u32 \t$dst, $src;"), []>;
+ def _s64 :
+ NVPTXInst<(outs RC:$dst),
+ (ins Int64Regs:$src, CvtMode:$mode),
+ !strconcat("cvt${mode:base}${mode:ftz}${mode:sat}.",
+ FromName, ".s64 \t$dst, $src;"), []>;
+ def _u64 :
+ NVPTXInst<(outs RC:$dst),
+ (ins Int64Regs:$src, CvtMode:$mode),
+ !strconcat("cvt${mode:base}${mode:ftz}${mode:sat}.",
+ FromName, ".u64 \t$dst, $src;"), []>;
+ def _f16 :
+ NVPTXInst<(outs RC:$dst),
+ (ins Float16Regs:$src, CvtMode:$mode),
+ !strconcat("cvt${mode:base}${mode:ftz}${mode:sat}.",
+ FromName, ".f16 \t$dst, $src;"), []>;
+ def _f32 :
+ NVPTXInst<(outs RC:$dst),
+ (ins Float32Regs:$src, CvtMode:$mode),
+ !strconcat("cvt${mode:base}${mode:ftz}${mode:sat}.",
+ FromName, ".f32 \t$dst, $src;"), []>;
+ def _f64 :
+ NVPTXInst<(outs RC:$dst),
+ (ins Float64Regs:$src, CvtMode:$mode),
+ !strconcat("cvt${mode:base}${mode:ftz}${mode:sat}.",
+ FromName, ".f64 \t$dst, $src;"), []>;
+ }
+
+ // Generate cvts from all types to all types.
+ defm CVT_s8 : CVT_FROM_ALL<"s8", Int16Regs>;
+ defm CVT_u8 : CVT_FROM_ALL<"u8", Int16Regs>;
+ defm CVT_s16 : CVT_FROM_ALL<"s16", Int16Regs>;
+ defm CVT_u16 : CVT_FROM_ALL<"u16", Int16Regs>;
+ defm CVT_s32 : CVT_FROM_ALL<"s32", Int32Regs>;
+ defm CVT_u32 : CVT_FROM_ALL<"u32", Int32Regs>;
+ defm CVT_s64 : CVT_FROM_ALL<"s64", Int64Regs>;
+ defm CVT_u64 : CVT_FROM_ALL<"u64", Int64Regs>;
+ defm CVT_f16 : CVT_FROM_ALL<"f16", Float16Regs>;
+ defm CVT_f32 : CVT_FROM_ALL<"f32", Float32Regs>;
+ defm CVT_f64 : CVT_FROM_ALL<"f64", Float64Regs>;
+
+ // These cvts are different from those above: The source and dest registers
+ // are of the same type.
+ def CVT_INREG_s16_s8 : NVPTXInst<(outs Int16Regs:$dst), (ins Int16Regs:$src),
+ "cvt.s16.s8 \t$dst, $src;", []>;
+ def CVT_INREG_s32_s8 : NVPTXInst<(outs Int32Regs:$dst), (ins Int32Regs:$src),
+ "cvt.s32.s8 \t$dst, $src;", []>;
+ def CVT_INREG_s32_s16 : NVPTXInst<(outs Int32Regs:$dst), (ins Int32Regs:$src),
+ "cvt.s32.s16 \t$dst, $src;", []>;
+ def CVT_INREG_s64_s8 : NVPTXInst<(outs Int64Regs:$dst), (ins Int64Regs:$src),
+ "cvt.s64.s8 \t$dst, $src;", []>;
+ def CVT_INREG_s64_s16 : NVPTXInst<(outs Int64Regs:$dst), (ins Int64Regs:$src),
+ "cvt.s64.s16 \t$dst, $src;", []>;
+ def CVT_INREG_s64_s32 : NVPTXInst<(outs Int64Regs:$dst), (ins Int64Regs:$src),
+ "cvt.s64.s32 \t$dst, $src;", []>;
+}
+
+//-----------------------------------
+// Integer Arithmetic
+//-----------------------------------
+
+// Template for xor masquerading as int1 arithmetic.
+multiclass ADD_SUB_i1<SDNode OpNode> {
+ def _rr: NVPTXInst<(outs Int1Regs:$dst), (ins Int1Regs:$a, Int1Regs:$b),
+ "xor.pred \t$dst, $a, $b;",
+ [(set Int1Regs:$dst, (OpNode Int1Regs:$a, Int1Regs:$b))]>;
+ def _ri: NVPTXInst<(outs Int1Regs:$dst), (ins Int1Regs:$a, i1imm:$b),
+ "xor.pred \t$dst, $a, $b;",
+ [(set Int1Regs:$dst, (OpNode Int1Regs:$a, (imm):$b))]>;
+}
+
+// int1 addition and subtraction are both just xor.
+defm ADD_i1 : ADD_SUB_i1<add>;
+defm SUB_i1 : ADD_SUB_i1<sub>;
+
+// int16, int32, and int64 signed addition. Since nvptx is 2's complement, we
+// also use these for unsigned arithmetic.
+defm ADD : I3<"add.s", add>;
+defm SUB : I3<"sub.s", sub>;
+
+// int32 addition and subtraction with carry-out.
+// FIXME: PTX 4.3 adds a 64-bit add.cc (and maybe also 64-bit addc.cc?).
+defm ADDCC : ADD_SUB_INT_32<"add.cc", addc>;
+defm SUBCC : ADD_SUB_INT_32<"sub.cc", subc>;
+
+// int32 addition and subtraction with carry-in and carry-out.
+defm ADDCCC : ADD_SUB_INT_32<"addc.cc", adde>;
+defm SUBCCC : ADD_SUB_INT_32<"subc.cc", sube>;
+
+defm MULT : I3<"mul.lo.s", mul>;
+
+defm MULTHS : I3<"mul.hi.s", mulhs>;
+defm MULTHU : I3<"mul.hi.u", mulhu>;
+
+defm SDIV : I3<"div.s", sdiv>;
+defm UDIV : I3<"div.u", udiv>;
+
+// The ri versions of rem.s and rem.u won't be selected; DAGCombiner::visitSREM
+// will lower it.
+defm SREM : I3<"rem.s", srem>;
+defm UREM : I3<"rem.u", urem>;
+
+// Integer absolute value. NumBits should be one minus the bit width of RC.
+// This idiom implements the algorithm at
+// http://graphics.stanford.edu/~seander/bithacks.html#IntegerAbs.
+multiclass ABS<RegisterClass RC, string SizeName> {
+ def : NVPTXInst<(outs RC:$dst), (ins RC:$a),
+ !strconcat("abs", SizeName, " \t$dst, $a;"),
+ [(set RC:$dst, (abs RC:$a))]>;
+}
+defm ABS_16 : ABS<Int16Regs, ".s16">;
+defm ABS_32 : ABS<Int32Regs, ".s32">;
+defm ABS_64 : ABS<Int64Regs, ".s64">;
+
+// Integer min/max.
+defm SMAX : I3<"max.s", smax>;
+defm UMAX : I3<"max.u", umax>;
+defm SMIN : I3<"min.s", smin>;
+defm UMIN : I3<"min.u", umin>;
+
+//
+// Wide multiplication
+//
+def MULWIDES64 :
+ NVPTXInst<(outs Int64Regs:$dst), (ins Int32Regs:$a, Int32Regs:$b),
+ "mul.wide.s32 \t$dst, $a, $b;", []>;
+def MULWIDES64Imm :
+ NVPTXInst<(outs Int64Regs:$dst), (ins Int32Regs:$a, i32imm:$b),
+ "mul.wide.s32 \t$dst, $a, $b;", []>;
+def MULWIDES64Imm64 :
+ NVPTXInst<(outs Int64Regs:$dst), (ins Int32Regs:$a, i64imm:$b),
+ "mul.wide.s32 \t$dst, $a, $b;", []>;
+
+def MULWIDEU64 :
+ NVPTXInst<(outs Int64Regs:$dst), (ins Int32Regs:$a, Int32Regs:$b),
+ "mul.wide.u32 \t$dst, $a, $b;", []>;
+def MULWIDEU64Imm :
+ NVPTXInst<(outs Int64Regs:$dst), (ins Int32Regs:$a, i32imm:$b),
+ "mul.wide.u32 \t$dst, $a, $b;", []>;
+def MULWIDEU64Imm64 :
+ NVPTXInst<(outs Int64Regs:$dst), (ins Int32Regs:$a, i64imm:$b),
+ "mul.wide.u32 \t$dst, $a, $b;", []>;
+
+def MULWIDES32 :
+ NVPTXInst<(outs Int32Regs:$dst), (ins Int16Regs:$a, Int16Regs:$b),
+ "mul.wide.s16 \t$dst, $a, $b;", []>;
+def MULWIDES32Imm :
+ NVPTXInst<(outs Int32Regs:$dst), (ins Int16Regs:$a, i16imm:$b),
+ "mul.wide.s16 \t$dst, $a, $b;", []>;
+def MULWIDES32Imm32 :
+ NVPTXInst<(outs Int32Regs:$dst), (ins Int16Regs:$a, i32imm:$b),
+ "mul.wide.s16 \t$dst, $a, $b;", []>;
+
+def MULWIDEU32 :
+ NVPTXInst<(outs Int32Regs:$dst), (ins Int16Regs:$a, Int16Regs:$b),
+ "mul.wide.u16 \t$dst, $a, $b;", []>;
+def MULWIDEU32Imm :
+ NVPTXInst<(outs Int32Regs:$dst), (ins Int16Regs:$a, i16imm:$b),
+ "mul.wide.u16 \t$dst, $a, $b;", []>;
+def MULWIDEU32Imm32 :
+ NVPTXInst<(outs Int32Regs:$dst), (ins Int16Regs:$a, i32imm:$b),
+ "mul.wide.u16 \t$dst, $a, $b;", []>;
+
+def SDTMulWide : SDTypeProfile<1, 2, [SDTCisSameAs<1, 2>]>;
+def mul_wide_signed : SDNode<"NVPTXISD::MUL_WIDE_SIGNED", SDTMulWide>;
+def mul_wide_unsigned : SDNode<"NVPTXISD::MUL_WIDE_UNSIGNED", SDTMulWide>;
+
+// Matchers for signed, unsigned mul.wide ISD nodes.
+def : Pat<(i32 (mul_wide_signed Int16Regs:$a, Int16Regs:$b)),
+ (MULWIDES32 Int16Regs:$a, Int16Regs:$b)>,
+ Requires<[doMulWide]>;
+def : Pat<(i32 (mul_wide_signed Int16Regs:$a, imm:$b)),
+ (MULWIDES32Imm Int16Regs:$a, imm:$b)>,
+ Requires<[doMulWide]>;
+def : Pat<(i32 (mul_wide_unsigned Int16Regs:$a, Int16Regs:$b)),
+ (MULWIDEU32 Int16Regs:$a, Int16Regs:$b)>,
+ Requires<[doMulWide]>;
+def : Pat<(i32 (mul_wide_unsigned Int16Regs:$a, imm:$b)),
+ (MULWIDEU32Imm Int16Regs:$a, imm:$b)>,
+ Requires<[doMulWide]>;
+
+def : Pat<(i64 (mul_wide_signed Int32Regs:$a, Int32Regs:$b)),
+ (MULWIDES64 Int32Regs:$a, Int32Regs:$b)>,
+ Requires<[doMulWide]>;
+def : Pat<(i64 (mul_wide_signed Int32Regs:$a, imm:$b)),
+ (MULWIDES64Imm Int32Regs:$a, imm:$b)>,
+ Requires<[doMulWide]>;
+def : Pat<(i64 (mul_wide_unsigned Int32Regs:$a, Int32Regs:$b)),
+ (MULWIDEU64 Int32Regs:$a, Int32Regs:$b)>,
+ Requires<[doMulWide]>;
+def : Pat<(i64 (mul_wide_unsigned Int32Regs:$a, imm:$b)),
+ (MULWIDEU64Imm Int32Regs:$a, imm:$b)>,
+ Requires<[doMulWide]>;
+
+// Predicates used for converting some patterns to mul.wide.
+def SInt32Const : PatLeaf<(imm), [{
+ const APInt &v = N->getAPIntValue();
+ return v.isSignedIntN(32);
+}]>;
+
+def UInt32Const : PatLeaf<(imm), [{
+ const APInt &v = N->getAPIntValue();
+ return v.isIntN(32);
+}]>;
+
+def SInt16Const : PatLeaf<(imm), [{
+ const APInt &v = N->getAPIntValue();
+ return v.isSignedIntN(16);
+}]>;
+
+def UInt16Const : PatLeaf<(imm), [{
+ const APInt &v = N->getAPIntValue();
+ return v.isIntN(16);
+}]>;
+
+def Int5Const : PatLeaf<(imm), [{
+ // Check if 0 <= v < 32; only then will the result of (x << v) be an int32.
+ const APInt &v = N->getAPIntValue();
+ return v.sge(0) && v.slt(32);
+}]>;
+
+def Int4Const : PatLeaf<(imm), [{
+ // Check if 0 <= v < 16; only then will the result of (x << v) be an int16.
+ const APInt &v = N->getAPIntValue();
+ return v.sge(0) && v.slt(16);
+}]>;
+
+def SHL2MUL32 : SDNodeXForm<imm, [{
+ const APInt &v = N->getAPIntValue();
+ APInt temp(32, 1);
+ return CurDAG->getTargetConstant(temp.shl(v), SDLoc(N), MVT::i32);
+}]>;
+
+def SHL2MUL16 : SDNodeXForm<imm, [{
+ const APInt &v = N->getAPIntValue();
+ APInt temp(16, 1);
+ return CurDAG->getTargetConstant(temp.shl(v), SDLoc(N), MVT::i16);
+}]>;
+
+// Convert "sign/zero-extend, then shift left by an immediate" to mul.wide.
+def : Pat<(shl (sext Int32Regs:$a), (i32 Int5Const:$b)),
+ (MULWIDES64Imm Int32Regs:$a, (SHL2MUL32 node:$b))>,
+ Requires<[doMulWide]>;
+def : Pat<(shl (zext Int32Regs:$a), (i32 Int5Const:$b)),
+ (MULWIDEU64Imm Int32Regs:$a, (SHL2MUL32 node:$b))>,
+ Requires<[doMulWide]>;
+
+def : Pat<(shl (sext Int16Regs:$a), (i16 Int4Const:$b)),
+ (MULWIDES32Imm Int16Regs:$a, (SHL2MUL16 node:$b))>,
+ Requires<[doMulWide]>;
+def : Pat<(shl (zext Int16Regs:$a), (i16 Int4Const:$b)),
+ (MULWIDEU32Imm Int16Regs:$a, (SHL2MUL16 node:$b))>,
+ Requires<[doMulWide]>;
+
+// Convert "sign/zero-extend then multiply" to mul.wide.
+def : Pat<(mul (sext Int32Regs:$a), (sext Int32Regs:$b)),
+ (MULWIDES64 Int32Regs:$a, Int32Regs:$b)>,
+ Requires<[doMulWide]>;
+def : Pat<(mul (sext Int32Regs:$a), (i64 SInt32Const:$b)),
+ (MULWIDES64Imm64 Int32Regs:$a, (i64 SInt32Const:$b))>,
+ Requires<[doMulWide]>;
+
+def : Pat<(mul (zext Int32Regs:$a), (zext Int32Regs:$b)),
+ (MULWIDEU64 Int32Regs:$a, Int32Regs:$b)>,
+ Requires<[doMulWide]>;
+def : Pat<(mul (zext Int32Regs:$a), (i64 UInt32Const:$b)),
+ (MULWIDEU64Imm64 Int32Regs:$a, (i64 UInt32Const:$b))>,
+ Requires<[doMulWide]>;
+
+def : Pat<(mul (sext Int16Regs:$a), (sext Int16Regs:$b)),
+ (MULWIDES32 Int16Regs:$a, Int16Regs:$b)>,
+ Requires<[doMulWide]>;
+def : Pat<(mul (sext Int16Regs:$a), (i32 SInt16Const:$b)),
+ (MULWIDES32Imm32 Int16Regs:$a, (i32 SInt16Const:$b))>,
+ Requires<[doMulWide]>;
+
+def : Pat<(mul (zext Int16Regs:$a), (zext Int16Regs:$b)),
+ (MULWIDEU32 Int16Regs:$a, Int16Regs:$b)>,
+ Requires<[doMulWide]>;
+def : Pat<(mul (zext Int16Regs:$a), (i32 UInt16Const:$b)),
+ (MULWIDEU32Imm32 Int16Regs:$a, (i32 UInt16Const:$b))>,
+ Requires<[doMulWide]>;
+
+//
+// Integer multiply-add
+//
+def SDTIMAD :
+ SDTypeProfile<1, 3, [SDTCisSameAs<0, 1>, SDTCisInt<0>, SDTCisInt<2>,
+ SDTCisSameAs<0, 2>, SDTCisSameAs<0, 3>]>;
+def imad : SDNode<"NVPTXISD::IMAD", SDTIMAD>;
+
+def MAD16rrr :
+ NVPTXInst<(outs Int16Regs:$dst),
+ (ins Int16Regs:$a, Int16Regs:$b, Int16Regs:$c),
+ "mad.lo.s16 \t$dst, $a, $b, $c;",
+ [(set Int16Regs:$dst, (imad Int16Regs:$a, Int16Regs:$b, Int16Regs:$c))]>;
+def MAD16rri :
+ NVPTXInst<(outs Int16Regs:$dst),
+ (ins Int16Regs:$a, Int16Regs:$b, i16imm:$c),
+ "mad.lo.s16 \t$dst, $a, $b, $c;",
+ [(set Int16Regs:$dst, (imad Int16Regs:$a, Int16Regs:$b, imm:$c))]>;
+def MAD16rir :
+ NVPTXInst<(outs Int16Regs:$dst),
+ (ins Int16Regs:$a, i16imm:$b, Int16Regs:$c),
+ "mad.lo.s16 \t$dst, $a, $b, $c;",
+ [(set Int16Regs:$dst, (imad Int16Regs:$a, imm:$b, Int16Regs:$c))]>;
+def MAD16rii :
+ NVPTXInst<(outs Int16Regs:$dst),
+ (ins Int16Regs:$a, i16imm:$b, i16imm:$c),
+ "mad.lo.s16 \t$dst, $a, $b, $c;",
+ [(set Int16Regs:$dst, (imad Int16Regs:$a, imm:$b, imm:$c))]>;
+
+def MAD32rrr :
+ NVPTXInst<(outs Int32Regs:$dst),
+ (ins Int32Regs:$a, Int32Regs:$b, Int32Regs:$c),
+ "mad.lo.s32 \t$dst, $a, $b, $c;",
+ [(set Int32Regs:$dst, (imad Int32Regs:$a, Int32Regs:$b, Int32Regs:$c))]>;
+def MAD32rri :
+ NVPTXInst<(outs Int32Regs:$dst),
+ (ins Int32Regs:$a, Int32Regs:$b, i32imm:$c),
+ "mad.lo.s32 \t$dst, $a, $b, $c;",
+ [(set Int32Regs:$dst, (imad Int32Regs:$a, Int32Regs:$b, imm:$c))]>;
+def MAD32rir :
+ NVPTXInst<(outs Int32Regs:$dst),
+ (ins Int32Regs:$a, i32imm:$b, Int32Regs:$c),
+ "mad.lo.s32 \t$dst, $a, $b, $c;",
+ [(set Int32Regs:$dst, (imad Int32Regs:$a, imm:$b, Int32Regs:$c))]>;
+def MAD32rii :
+ NVPTXInst<(outs Int32Regs:$dst),
+ (ins Int32Regs:$a, i32imm:$b, i32imm:$c),
+ "mad.lo.s32 \t$dst, $a, $b, $c;",
+ [(set Int32Regs:$dst, (imad Int32Regs:$a, imm:$b, imm:$c))]>;
+
+def MAD64rrr :
+ NVPTXInst<(outs Int64Regs:$dst),
+ (ins Int64Regs:$a, Int64Regs:$b, Int64Regs:$c),
+ "mad.lo.s64 \t$dst, $a, $b, $c;",
+ [(set Int64Regs:$dst, (imad Int64Regs:$a, Int64Regs:$b, Int64Regs:$c))]>;
+def MAD64rri :
+ NVPTXInst<(outs Int64Regs:$dst),
+ (ins Int64Regs:$a, Int64Regs:$b, i64imm:$c),
+ "mad.lo.s64 \t$dst, $a, $b, $c;",
+ [(set Int64Regs:$dst, (imad Int64Regs:$a, Int64Regs:$b, imm:$c))]>;
+def MAD64rir :
+ NVPTXInst<(outs Int64Regs:$dst),
+ (ins Int64Regs:$a, i64imm:$b, Int64Regs:$c),
+ "mad.lo.s64 \t$dst, $a, $b, $c;",
+ [(set Int64Regs:$dst, (imad Int64Regs:$a, imm:$b, Int64Regs:$c))]>;
+def MAD64rii :
+ NVPTXInst<(outs Int64Regs:$dst),
+ (ins Int64Regs:$a, i64imm:$b, i64imm:$c),
+ "mad.lo.s64 \t$dst, $a, $b, $c;",
+ [(set Int64Regs:$dst, (imad Int64Regs:$a, imm:$b, imm:$c))]>;
+
+def INEG16 :
+ NVPTXInst<(outs Int16Regs:$dst), (ins Int16Regs:$src),
+ "neg.s16 \t$dst, $src;",
+ [(set Int16Regs:$dst, (ineg Int16Regs:$src))]>;
+def INEG32 :
+ NVPTXInst<(outs Int32Regs:$dst), (ins Int32Regs:$src),
+ "neg.s32 \t$dst, $src;",
+ [(set Int32Regs:$dst, (ineg Int32Regs:$src))]>;
+def INEG64 :
+ NVPTXInst<(outs Int64Regs:$dst), (ins Int64Regs:$src),
+ "neg.s64 \t$dst, $src;",
+ [(set Int64Regs:$dst, (ineg Int64Regs:$src))]>;
+
+//-----------------------------------
+// Floating Point Arithmetic
+//-----------------------------------
+
+// Constant 1.0f
+def FloatConst1 : PatLeaf<(fpimm), [{
+ return &N->getValueAPF().getSemantics() == &llvm::APFloat::IEEEsingle() &&
+ N->getValueAPF().convertToFloat() == 1.0f;
+}]>;
+// Constant 1.0 (double)
+def DoubleConst1 : PatLeaf<(fpimm), [{
+ return &N->getValueAPF().getSemantics() == &llvm::APFloat::IEEEdouble() &&
+ N->getValueAPF().convertToDouble() == 1.0;
+}]>;
+
+// Loads FP16 constant into a register.
+//
+// ptxas does not have hex representation for fp16, so we can't use
+// fp16 immediate values in .f16 instructions. Instead we have to load
+// the constant into a register using mov.b16.
+def LOAD_CONST_F16 :
+ NVPTXInst<(outs Float16Regs:$dst), (ins f16imm:$a),
+ "mov.b16 \t$dst, $a;", []>;
+
+defm FADD : F3_fma_component<"add", fadd>;
+defm FSUB : F3_fma_component<"sub", fsub>;
+defm FMUL : F3_fma_component<"mul", fmul>;
+
+defm FMIN : F3<"min", fminnum>;
+defm FMAX : F3<"max", fmaxnum>;
+
+defm FABS : F2<"abs", fabs>;
+defm FNEG : F2<"neg", fneg>;
+defm FSQRT : F2<"sqrt.rn", fsqrt>;
+
+//
+// F64 division
+//
+def FDIV641r :
+ NVPTXInst<(outs Float64Regs:$dst),
+ (ins f64imm:$a, Float64Regs:$b),
+ "rcp.rn.f64 \t$dst, $b;",
+ [(set Float64Regs:$dst, (fdiv DoubleConst1:$a, Float64Regs:$b))]>;
+def FDIV64rr :
+ NVPTXInst<(outs Float64Regs:$dst),
+ (ins Float64Regs:$a, Float64Regs:$b),
+ "div.rn.f64 \t$dst, $a, $b;",
+ [(set Float64Regs:$dst, (fdiv Float64Regs:$a, Float64Regs:$b))]>;
+def FDIV64ri :
+ NVPTXInst<(outs Float64Regs:$dst),
+ (ins Float64Regs:$a, f64imm:$b),
+ "div.rn.f64 \t$dst, $a, $b;",
+ [(set Float64Regs:$dst, (fdiv Float64Regs:$a, fpimm:$b))]>;
+
+//
+// F32 Approximate reciprocal
+//
+def FDIV321r_ftz :
+ NVPTXInst<(outs Float32Regs:$dst),
+ (ins f32imm:$a, Float32Regs:$b),
+ "rcp.approx.ftz.f32 \t$dst, $b;",
+ [(set Float32Regs:$dst, (fdiv FloatConst1:$a, Float32Regs:$b))]>,
+ Requires<[do_DIVF32_APPROX, doF32FTZ]>;
+def FDIV321r :
+ NVPTXInst<(outs Float32Regs:$dst),
+ (ins f32imm:$a, Float32Regs:$b),
+ "rcp.approx.f32 \t$dst, $b;",
+ [(set Float32Regs:$dst, (fdiv FloatConst1:$a, Float32Regs:$b))]>,
+ Requires<[do_DIVF32_APPROX]>;
+//
+// F32 Approximate division
+//
+def FDIV32approxrr_ftz :
+ NVPTXInst<(outs Float32Regs:$dst),
+ (ins Float32Regs:$a, Float32Regs:$b),
+ "div.approx.ftz.f32 \t$dst, $a, $b;",
+ [(set Float32Regs:$dst, (fdiv Float32Regs:$a, Float32Regs:$b))]>,
+ Requires<[do_DIVF32_APPROX, doF32FTZ]>;
+def FDIV32approxri_ftz :
+ NVPTXInst<(outs Float32Regs:$dst),
+ (ins Float32Regs:$a, f32imm:$b),
+ "div.approx.ftz.f32 \t$dst, $a, $b;",
+ [(set Float32Regs:$dst, (fdiv Float32Regs:$a, fpimm:$b))]>,
+ Requires<[do_DIVF32_APPROX, doF32FTZ]>;
+def FDIV32approxrr :
+ NVPTXInst<(outs Float32Regs:$dst),
+ (ins Float32Regs:$a, Float32Regs:$b),
+ "div.approx.f32 \t$dst, $a, $b;",
+ [(set Float32Regs:$dst, (fdiv Float32Regs:$a, Float32Regs:$b))]>,
+ Requires<[do_DIVF32_APPROX]>;
+def FDIV32approxri :
+ NVPTXInst<(outs Float32Regs:$dst),
+ (ins Float32Regs:$a, f32imm:$b),
+ "div.approx.f32 \t$dst, $a, $b;",
+ [(set Float32Regs:$dst, (fdiv Float32Regs:$a, fpimm:$b))]>,
+ Requires<[do_DIVF32_APPROX]>;
+//
+// F32 Semi-accurate reciprocal
+//
+// rcp.approx gives the same result as div.full(1.0f, a) and is faster.
+//
+def FDIV321r_approx_ftz :
+ NVPTXInst<(outs Float32Regs:$dst),
+ (ins f32imm:$a, Float32Regs:$b),
+ "rcp.approx.ftz.f32 \t$dst, $b;",
+ [(set Float32Regs:$dst, (fdiv FloatConst1:$a, Float32Regs:$b))]>,
+ Requires<[do_DIVF32_FULL, doF32FTZ]>;
+def FDIV321r_approx :
+ NVPTXInst<(outs Float32Regs:$dst),
+ (ins f32imm:$a, Float32Regs:$b),
+ "rcp.approx.f32 \t$dst, $b;",
+ [(set Float32Regs:$dst, (fdiv FloatConst1:$a, Float32Regs:$b))]>,
+ Requires<[do_DIVF32_FULL]>;
+//
+// F32 Semi-accurate division
+//
+def FDIV32rr_ftz :
+ NVPTXInst<(outs Float32Regs:$dst),
+ (ins Float32Regs:$a, Float32Regs:$b),
+ "div.full.ftz.f32 \t$dst, $a, $b;",
+ [(set Float32Regs:$dst, (fdiv Float32Regs:$a, Float32Regs:$b))]>,
+ Requires<[do_DIVF32_FULL, doF32FTZ]>;
+def FDIV32ri_ftz :
+ NVPTXInst<(outs Float32Regs:$dst),
+ (ins Float32Regs:$a, f32imm:$b),
+ "div.full.ftz.f32 \t$dst, $a, $b;",
+ [(set Float32Regs:$dst, (fdiv Float32Regs:$a, fpimm:$b))]>,
+ Requires<[do_DIVF32_FULL, doF32FTZ]>;
+def FDIV32rr :
+ NVPTXInst<(outs Float32Regs:$dst),
+ (ins Float32Regs:$a, Float32Regs:$b),
+ "div.full.f32 \t$dst, $a, $b;",
+ [(set Float32Regs:$dst, (fdiv Float32Regs:$a, Float32Regs:$b))]>,
+ Requires<[do_DIVF32_FULL]>;
+def FDIV32ri :
+ NVPTXInst<(outs Float32Regs:$dst),
+ (ins Float32Regs:$a, f32imm:$b),
+ "div.full.f32 \t$dst, $a, $b;",
+ [(set Float32Regs:$dst, (fdiv Float32Regs:$a, fpimm:$b))]>,
+ Requires<[do_DIVF32_FULL]>;
+//
+// F32 Accurate reciprocal
+//
+def FDIV321r_prec_ftz :
+ NVPTXInst<(outs Float32Regs:$dst),
+ (ins f32imm:$a, Float32Regs:$b),
+ "rcp.rn.ftz.f32 \t$dst, $b;",
+ [(set Float32Regs:$dst, (fdiv FloatConst1:$a, Float32Regs:$b))]>,
+ Requires<[reqPTX20, doF32FTZ]>;
+def FDIV321r_prec :
+ NVPTXInst<(outs Float32Regs:$dst),
+ (ins f32imm:$a, Float32Regs:$b),
+ "rcp.rn.f32 \t$dst, $b;",
+ [(set Float32Regs:$dst, (fdiv FloatConst1:$a, Float32Regs:$b))]>,
+ Requires<[reqPTX20]>;
+//
+// F32 Accurate division
+//
+def FDIV32rr_prec_ftz :
+ NVPTXInst<(outs Float32Regs:$dst),
+ (ins Float32Regs:$a, Float32Regs:$b),
+ "div.rn.ftz.f32 \t$dst, $a, $b;",
+ [(set Float32Regs:$dst, (fdiv Float32Regs:$a, Float32Regs:$b))]>,
+ Requires<[doF32FTZ, reqPTX20]>;
+def FDIV32ri_prec_ftz :
+ NVPTXInst<(outs Float32Regs:$dst),
+ (ins Float32Regs:$a, f32imm:$b),
+ "div.rn.ftz.f32 \t$dst, $a, $b;",
+ [(set Float32Regs:$dst, (fdiv Float32Regs:$a, fpimm:$b))]>,
+ Requires<[doF32FTZ, reqPTX20]>;
+def FDIV32rr_prec :
+ NVPTXInst<(outs Float32Regs:$dst),
+ (ins Float32Regs:$a, Float32Regs:$b),
+ "div.rn.f32 \t$dst, $a, $b;",
+ [(set Float32Regs:$dst, (fdiv Float32Regs:$a, Float32Regs:$b))]>,
+ Requires<[reqPTX20]>;
+def FDIV32ri_prec :
+ NVPTXInst<(outs Float32Regs:$dst),
+ (ins Float32Regs:$a, f32imm:$b),
+ "div.rn.f32 \t$dst, $a, $b;",
+ [(set Float32Regs:$dst, (fdiv Float32Regs:$a, fpimm:$b))]>,
+ Requires<[reqPTX20]>;
+
+//
+// FMA
+//
+
+multiclass FMA<string OpcStr, RegisterClass RC, Operand ImmCls, Predicate Pred> {
+ def rrr : NVPTXInst<(outs RC:$dst), (ins RC:$a, RC:$b, RC:$c),
+ !strconcat(OpcStr, " \t$dst, $a, $b, $c;"),
+ [(set RC:$dst, (fma RC:$a, RC:$b, RC:$c))]>,
+ Requires<[Pred]>;
+ def rri : NVPTXInst<(outs RC:$dst),
+ (ins RC:$a, RC:$b, ImmCls:$c),
+ !strconcat(OpcStr, " \t$dst, $a, $b, $c;"),
+ [(set RC:$dst, (fma RC:$a, RC:$b, fpimm:$c))]>,
+ Requires<[Pred]>;
+ def rir : NVPTXInst<(outs RC:$dst),
+ (ins RC:$a, ImmCls:$b, RC:$c),
+ !strconcat(OpcStr, " \t$dst, $a, $b, $c;"),
+ [(set RC:$dst, (fma RC:$a, fpimm:$b, RC:$c))]>,
+ Requires<[Pred]>;
+ def rii : NVPTXInst<(outs RC:$dst),
+ (ins RC:$a, ImmCls:$b, ImmCls:$c),
+ !strconcat(OpcStr, " \t$dst, $a, $b, $c;"),
+ [(set RC:$dst, (fma RC:$a, fpimm:$b, fpimm:$c))]>,
+ Requires<[Pred]>;
+}
+
+multiclass FMA_F16<string OpcStr, RegisterClass RC, Predicate Pred> {
+ def rrr : NVPTXInst<(outs RC:$dst), (ins RC:$a, RC:$b, RC:$c),
+ !strconcat(OpcStr, " \t$dst, $a, $b, $c;"),
+ [(set RC:$dst, (fma RC:$a, RC:$b, RC:$c))]>,
+ Requires<[useFP16Math, Pred]>;
+}
+
+defm FMA16_ftz : FMA_F16<"fma.rn.ftz.f16", Float16Regs, doF32FTZ>;
+defm FMA16 : FMA_F16<"fma.rn.f16", Float16Regs, true>;
+defm FMA16x2_ftz : FMA_F16<"fma.rn.ftz.f16x2", Float16x2Regs, doF32FTZ>;
+defm FMA16x2 : FMA_F16<"fma.rn.f16x2", Float16x2Regs, true>;
+defm FMA32_ftz : FMA<"fma.rn.ftz.f32", Float32Regs, f32imm, doF32FTZ>;
+defm FMA32 : FMA<"fma.rn.f32", Float32Regs, f32imm, true>;
+defm FMA64 : FMA<"fma.rn.f64", Float64Regs, f64imm, true>;
+
+// sin/cos
+def SINF: NVPTXInst<(outs Float32Regs:$dst), (ins Float32Regs:$src),
+ "sin.approx.f32 \t$dst, $src;",
+ [(set Float32Regs:$dst, (fsin Float32Regs:$src))]>,
+ Requires<[allowUnsafeFPMath]>;
+def COSF: NVPTXInst<(outs Float32Regs:$dst), (ins Float32Regs:$src),
+ "cos.approx.f32 \t$dst, $src;",
+ [(set Float32Regs:$dst, (fcos Float32Regs:$src))]>,
+ Requires<[allowUnsafeFPMath]>;
+
+// Lower (frem x, y) into (sub x, (mul (floor (div x, y)) y)),
+// i.e. "poor man's fmod()"
+
+// frem - f32 FTZ
+def : Pat<(frem Float32Regs:$x, Float32Regs:$y),
+ (FSUBf32rr_ftz Float32Regs:$x, (FMULf32rr_ftz (CVT_f32_f32
+ (FDIV32rr_prec_ftz Float32Regs:$x, Float32Regs:$y), CvtRMI_FTZ),
+ Float32Regs:$y))>,
+ Requires<[doF32FTZ]>;
+def : Pat<(frem Float32Regs:$x, fpimm:$y),
+ (FSUBf32rr_ftz Float32Regs:$x, (FMULf32ri_ftz (CVT_f32_f32
+ (FDIV32ri_prec_ftz Float32Regs:$x, fpimm:$y), CvtRMI_FTZ),
+ fpimm:$y))>,
+ Requires<[doF32FTZ]>;
+
+// frem - f32
+def : Pat<(frem Float32Regs:$x, Float32Regs:$y),
+ (FSUBf32rr Float32Regs:$x, (FMULf32rr (CVT_f32_f32
+ (FDIV32rr_prec Float32Regs:$x, Float32Regs:$y), CvtRMI),
+ Float32Regs:$y))>;
+def : Pat<(frem Float32Regs:$x, fpimm:$y),
+ (FSUBf32rr Float32Regs:$x, (FMULf32ri (CVT_f32_f32
+ (FDIV32ri_prec Float32Regs:$x, fpimm:$y), CvtRMI),
+ fpimm:$y))>;
+
+// frem - f64
+def : Pat<(frem Float64Regs:$x, Float64Regs:$y),
+ (FSUBf64rr Float64Regs:$x, (FMULf64rr (CVT_f64_f64
+ (FDIV64rr Float64Regs:$x, Float64Regs:$y), CvtRMI),
+ Float64Regs:$y))>;
+def : Pat<(frem Float64Regs:$x, fpimm:$y),
+ (FSUBf64rr Float64Regs:$x, (FMULf64ri (CVT_f64_f64
+ (FDIV64ri Float64Regs:$x, fpimm:$y), CvtRMI),
+ fpimm:$y))>;
+
+//-----------------------------------
+// Bitwise operations
+//-----------------------------------
+
+// Template for three-arg bitwise operations. Takes three args, Creates .b16,
+// .b32, .b64, and .pred (predicate registers -- i.e., i1) versions of OpcStr.
+multiclass BITWISE<string OpcStr, SDNode OpNode> {
+ def b1rr :
+ NVPTXInst<(outs Int1Regs:$dst), (ins Int1Regs:$a, Int1Regs:$b),
+ !strconcat(OpcStr, ".pred \t$dst, $a, $b;"),
+ [(set Int1Regs:$dst, (OpNode Int1Regs:$a, Int1Regs:$b))]>;
+ def b1ri :
+ NVPTXInst<(outs Int1Regs:$dst), (ins Int1Regs:$a, i1imm:$b),
+ !strconcat(OpcStr, ".pred \t$dst, $a, $b;"),
+ [(set Int1Regs:$dst, (OpNode Int1Regs:$a, imm:$b))]>;
+ def b16rr :
+ NVPTXInst<(outs Int16Regs:$dst), (ins Int16Regs:$a, Int16Regs:$b),
+ !strconcat(OpcStr, ".b16 \t$dst, $a, $b;"),
+ [(set Int16Regs:$dst, (OpNode Int16Regs:$a, Int16Regs:$b))]>;
+ def b16ri :
+ NVPTXInst<(outs Int16Regs:$dst), (ins Int16Regs:$a, i16imm:$b),
+ !strconcat(OpcStr, ".b16 \t$dst, $a, $b;"),
+ [(set Int16Regs:$dst, (OpNode Int16Regs:$a, imm:$b))]>;
+ def b32rr :
+ NVPTXInst<(outs Int32Regs:$dst), (ins Int32Regs:$a, Int32Regs:$b),
+ !strconcat(OpcStr, ".b32 \t$dst, $a, $b;"),
+ [(set Int32Regs:$dst, (OpNode Int32Regs:$a, Int32Regs:$b))]>;
+ def b32ri :
+ NVPTXInst<(outs Int32Regs:$dst), (ins Int32Regs:$a, i32imm:$b),
+ !strconcat(OpcStr, ".b32 \t$dst, $a, $b;"),
+ [(set Int32Regs:$dst, (OpNode Int32Regs:$a, imm:$b))]>;
+ def b64rr :
+ NVPTXInst<(outs Int64Regs:$dst), (ins Int64Regs:$a, Int64Regs:$b),
+ !strconcat(OpcStr, ".b64 \t$dst, $a, $b;"),
+ [(set Int64Regs:$dst, (OpNode Int64Regs:$a, Int64Regs:$b))]>;
+ def b64ri :
+ NVPTXInst<(outs Int64Regs:$dst), (ins Int64Regs:$a, i64imm:$b),
+ !strconcat(OpcStr, ".b64 \t$dst, $a, $b;"),
+ [(set Int64Regs:$dst, (OpNode Int64Regs:$a, imm:$b))]>;
+}
+
+defm OR : BITWISE<"or", or>;
+defm AND : BITWISE<"and", and>;
+defm XOR : BITWISE<"xor", xor>;
+
+def NOT1 : NVPTXInst<(outs Int1Regs:$dst), (ins Int1Regs:$src),
+ "not.pred \t$dst, $src;",
+ [(set Int1Regs:$dst, (not Int1Regs:$src))]>;
+def NOT16 : NVPTXInst<(outs Int16Regs:$dst), (ins Int16Regs:$src),
+ "not.b16 \t$dst, $src;",
+ [(set Int16Regs:$dst, (not Int16Regs:$src))]>;
+def NOT32 : NVPTXInst<(outs Int32Regs:$dst), (ins Int32Regs:$src),
+ "not.b32 \t$dst, $src;",
+ [(set Int32Regs:$dst, (not Int32Regs:$src))]>;
+def NOT64 : NVPTXInst<(outs Int64Regs:$dst), (ins Int64Regs:$src),
+ "not.b64 \t$dst, $src;",
+ [(set Int64Regs:$dst, (not Int64Regs:$src))]>;
+
+// Template for left/right shifts. Takes three operands,
+// [dest (reg), src (reg), shift (reg or imm)].
+// dest and src may be int64, int32, or int16, but shift is always int32.
+//
+// This template also defines a 32-bit shift (imm, imm) instruction.
+multiclass SHIFT<string OpcStr, SDNode OpNode> {
+ def i64rr :
+ NVPTXInst<(outs Int64Regs:$dst), (ins Int64Regs:$a, Int32Regs:$b),
+ !strconcat(OpcStr, "64 \t$dst, $a, $b;"),
+ [(set Int64Regs:$dst, (OpNode Int64Regs:$a, Int32Regs:$b))]>;
+ def i64ri :
+ NVPTXInst<(outs Int64Regs:$dst), (ins Int64Regs:$a, i32imm:$b),
+ !strconcat(OpcStr, "64 \t$dst, $a, $b;"),
+ [(set Int64Regs:$dst, (OpNode Int64Regs:$a, (i32 imm:$b)))]>;
+ def i32rr :
+ NVPTXInst<(outs Int32Regs:$dst), (ins Int32Regs:$a, Int32Regs:$b),
+ !strconcat(OpcStr, "32 \t$dst, $a, $b;"),
+ [(set Int32Regs:$dst, (OpNode Int32Regs:$a, Int32Regs:$b))]>;
+ def i32ri :
+ NVPTXInst<(outs Int32Regs:$dst), (ins Int32Regs:$a, i32imm:$b),
+ !strconcat(OpcStr, "32 \t$dst, $a, $b;"),
+ [(set Int32Regs:$dst, (OpNode Int32Regs:$a, (i32 imm:$b)))]>;
+ def i32ii :
+ NVPTXInst<(outs Int32Regs:$dst), (ins i32imm:$a, i32imm:$b),
+ !strconcat(OpcStr, "32 \t$dst, $a, $b;"),
+ [(set Int32Regs:$dst, (OpNode (i32 imm:$a), (i32 imm:$b)))]>;
+ def i16rr :
+ NVPTXInst<(outs Int16Regs:$dst), (ins Int16Regs:$a, Int32Regs:$b),
+ !strconcat(OpcStr, "16 \t$dst, $a, $b;"),
+ [(set Int16Regs:$dst, (OpNode Int16Regs:$a, Int32Regs:$b))]>;
+ def i16ri :
+ NVPTXInst<(outs Int16Regs:$dst), (ins Int16Regs:$a, i32imm:$b),
+ !strconcat(OpcStr, "16 \t$dst, $a, $b;"),
+ [(set Int16Regs:$dst, (OpNode Int16Regs:$a, (i32 imm:$b)))]>;
+}
+
+defm SHL : SHIFT<"shl.b", shl>;
+defm SRA : SHIFT<"shr.s", sra>;
+defm SRL : SHIFT<"shr.u", srl>;
+
+// Bit-reverse
+def BREV32 :
+ NVPTXInst<(outs Int32Regs:$dst), (ins Int32Regs:$a),
+ "brev.b32 \t$dst, $a;",
+ [(set Int32Regs:$dst, (bitreverse Int32Regs:$a))]>;
+def BREV64 :
+ NVPTXInst<(outs Int64Regs:$dst), (ins Int64Regs:$a),
+ "brev.b64 \t$dst, $a;",
+ [(set Int64Regs:$dst, (bitreverse Int64Regs:$a))]>;
+
+//
+// Rotate: Use ptx shf instruction if available.
+//
+
+// 32 bit r2 = rotl r1, n
+// =>
+// r2 = shf.l r1, r1, n
+def ROTL32imm_hw :
+ NVPTXInst<(outs Int32Regs:$dst), (ins Int32Regs:$src, i32imm:$amt),
+ "shf.l.wrap.b32 \t$dst, $src, $src, $amt;",
+ [(set Int32Regs:$dst, (rotl Int32Regs:$src, (i32 imm:$amt)))]>,
+ Requires<[hasHWROT32]>;
+
+def ROTL32reg_hw :
+ NVPTXInst<(outs Int32Regs:$dst), (ins Int32Regs:$src, Int32Regs:$amt),
+ "shf.l.wrap.b32 \t$dst, $src, $src, $amt;",
+ [(set Int32Regs:$dst, (rotl Int32Regs:$src, Int32Regs:$amt))]>,
+ Requires<[hasHWROT32]>;
+
+// 32 bit r2 = rotr r1, n
+// =>
+// r2 = shf.r r1, r1, n
+def ROTR32imm_hw :
+ NVPTXInst<(outs Int32Regs:$dst), (ins Int32Regs:$src, i32imm:$amt),
+ "shf.r.wrap.b32 \t$dst, $src, $src, $amt;",
+ [(set Int32Regs:$dst, (rotr Int32Regs:$src, (i32 imm:$amt)))]>,
+ Requires<[hasHWROT32]>;
+
+def ROTR32reg_hw :
+ NVPTXInst<(outs Int32Regs:$dst), (ins Int32Regs:$src, Int32Regs:$amt),
+ "shf.r.wrap.b32 \t$dst, $src, $src, $amt;",
+ [(set Int32Regs:$dst, (rotr Int32Regs:$src, Int32Regs:$amt))]>,
+ Requires<[hasHWROT32]>;
+
+// 32-bit software rotate by immediate. $amt2 should equal 32 - $amt1.
+def ROT32imm_sw :
+ NVPTXInst<(outs Int32Regs:$dst),
+ (ins Int32Regs:$src, i32imm:$amt1, i32imm:$amt2),
+ "{{\n\t"
+ ".reg .b32 %lhs;\n\t"
+ ".reg .b32 %rhs;\n\t"
+ "shl.b32 \t%lhs, $src, $amt1;\n\t"
+ "shr.b32 \t%rhs, $src, $amt2;\n\t"
+ "add.u32 \t$dst, %lhs, %rhs;\n\t"
+ "}}",
+ []>;
+
+def SUB_FRM_32 : SDNodeXForm<imm, [{
+ return CurDAG->getTargetConstant(32 - N->getZExtValue(), SDLoc(N), MVT::i32);
+}]>;
+
+def : Pat<(rotl Int32Regs:$src, (i32 imm:$amt)),
+ (ROT32imm_sw Int32Regs:$src, imm:$amt, (SUB_FRM_32 node:$amt))>,
+ Requires<[noHWROT32]>;
+def : Pat<(rotr Int32Regs:$src, (i32 imm:$amt)),
+ (ROT32imm_sw Int32Regs:$src, (SUB_FRM_32 node:$amt), imm:$amt)>,
+ Requires<[noHWROT32]>;
+
+// 32-bit software rotate left by register.
+def ROTL32reg_sw :
+ NVPTXInst<(outs Int32Regs:$dst), (ins Int32Regs:$src, Int32Regs:$amt),
+ "{{\n\t"
+ ".reg .b32 %lhs;\n\t"
+ ".reg .b32 %rhs;\n\t"
+ ".reg .b32 %amt2;\n\t"
+ "shl.b32 \t%lhs, $src, $amt;\n\t"
+ "sub.s32 \t%amt2, 32, $amt;\n\t"
+ "shr.b32 \t%rhs, $src, %amt2;\n\t"
+ "add.u32 \t$dst, %lhs, %rhs;\n\t"
+ "}}",
+ [(set Int32Regs:$dst, (rotl Int32Regs:$src, Int32Regs:$amt))]>,
+ Requires<[noHWROT32]>;
+
+// 32-bit software rotate right by register.
+def ROTR32reg_sw :
+ NVPTXInst<(outs Int32Regs:$dst), (ins Int32Regs:$src, Int32Regs:$amt),
+ "{{\n\t"
+ ".reg .b32 %lhs;\n\t"
+ ".reg .b32 %rhs;\n\t"
+ ".reg .b32 %amt2;\n\t"
+ "shr.b32 \t%lhs, $src, $amt;\n\t"
+ "sub.s32 \t%amt2, 32, $amt;\n\t"
+ "shl.b32 \t%rhs, $src, %amt2;\n\t"
+ "add.u32 \t$dst, %lhs, %rhs;\n\t"
+ "}}",
+ [(set Int32Regs:$dst, (rotr Int32Regs:$src, Int32Regs:$amt))]>,
+ Requires<[noHWROT32]>;
+
+// 64-bit software rotate by immediate. $amt2 should equal 64 - $amt1.
+def ROT64imm_sw :
+ NVPTXInst<(outs Int64Regs:$dst),
+ (ins Int64Regs:$src, i32imm:$amt1, i32imm:$amt2),
+ "{{\n\t"
+ ".reg .b64 %lhs;\n\t"
+ ".reg .b64 %rhs;\n\t"
+ "shl.b64 \t%lhs, $src, $amt1;\n\t"
+ "shr.b64 \t%rhs, $src, $amt2;\n\t"
+ "add.u64 \t$dst, %lhs, %rhs;\n\t"
+ "}}",
+ []>;
+
+def SUB_FRM_64 : SDNodeXForm<imm, [{
+ return CurDAG->getTargetConstant(64-N->getZExtValue(), SDLoc(N), MVT::i32);
+}]>;
+
+def : Pat<(rotl Int64Regs:$src, (i32 imm:$amt)),
+ (ROT64imm_sw Int64Regs:$src, imm:$amt, (SUB_FRM_64 node:$amt))>;
+def : Pat<(rotr Int64Regs:$src, (i32 imm:$amt)),
+ (ROT64imm_sw Int64Regs:$src, (SUB_FRM_64 node:$amt), imm:$amt)>;
+
+// 64-bit software rotate left by register.
+def ROTL64reg_sw :
+ NVPTXInst<(outs Int64Regs:$dst), (ins Int64Regs:$src, Int32Regs:$amt),
+ "{{\n\t"
+ ".reg .b64 %lhs;\n\t"
+ ".reg .b64 %rhs;\n\t"
+ ".reg .u32 %amt2;\n\t"
+ "shl.b64 \t%lhs, $src, $amt;\n\t"
+ "sub.u32 \t%amt2, 64, $amt;\n\t"
+ "shr.b64 \t%rhs, $src, %amt2;\n\t"
+ "add.u64 \t$dst, %lhs, %rhs;\n\t"
+ "}}",
+ [(set Int64Regs:$dst, (rotl Int64Regs:$src, Int32Regs:$amt))]>;
+
+def ROTR64reg_sw :
+ NVPTXInst<(outs Int64Regs:$dst), (ins Int64Regs:$src, Int32Regs:$amt),
+ "{{\n\t"
+ ".reg .b64 %lhs;\n\t"
+ ".reg .b64 %rhs;\n\t"
+ ".reg .u32 %amt2;\n\t"
+ "shr.b64 \t%lhs, $src, $amt;\n\t"
+ "sub.u32 \t%amt2, 64, $amt;\n\t"
+ "shl.b64 \t%rhs, $src, %amt2;\n\t"
+ "add.u64 \t$dst, %lhs, %rhs;\n\t"
+ "}}",
+ [(set Int64Regs:$dst, (rotr Int64Regs:$src, Int32Regs:$amt))]>;
+
+//
+// Funnnel shift in clamp mode
+//
+
+// Create SDNodes so they can be used in the DAG code, e.g.
+// NVPTXISelLowering (LowerShiftLeftParts and LowerShiftRightParts)
+def SDTIntShiftDOp :
+ SDTypeProfile<1, 3, [SDTCisSameAs<0, 1>, SDTCisSameAs<0, 2>,
+ SDTCisInt<0>, SDTCisInt<3>]>;
+def FUN_SHFL_CLAMP : SDNode<"NVPTXISD::FUN_SHFL_CLAMP", SDTIntShiftDOp, []>;
+def FUN_SHFR_CLAMP : SDNode<"NVPTXISD::FUN_SHFR_CLAMP", SDTIntShiftDOp, []>;
+
+def FUNSHFLCLAMP :
+ NVPTXInst<(outs Int32Regs:$dst),
+ (ins Int32Regs:$lo, Int32Regs:$hi, Int32Regs:$amt),
+ "shf.l.clamp.b32 \t$dst, $lo, $hi, $amt;",
+ [(set Int32Regs:$dst,
+ (FUN_SHFL_CLAMP Int32Regs:$lo, Int32Regs:$hi, Int32Regs:$amt))]>;
+
+def FUNSHFRCLAMP :
+ NVPTXInst<(outs Int32Regs:$dst),
+ (ins Int32Regs:$lo, Int32Regs:$hi, Int32Regs:$amt),
+ "shf.r.clamp.b32 \t$dst, $lo, $hi, $amt;",
+ [(set Int32Regs:$dst,
+ (FUN_SHFR_CLAMP Int32Regs:$lo, Int32Regs:$hi, Int32Regs:$amt))]>;
+
+//
+// BFE - bit-field extract
+//
+
+// Template for BFE instructions. Takes four args,
+// [dest (reg), src (reg), start (reg or imm), end (reg or imm)].
+// Start may be an imm only if end is also an imm. FIXME: Is this a
+// restriction in PTX?
+//
+// dest and src may be int32 or int64, but start and end are always int32.
+multiclass BFE<string TyStr, RegisterClass RC> {
+ def rrr
+ : NVPTXInst<(outs RC:$d),
+ (ins RC:$a, Int32Regs:$b, Int32Regs:$c),
+ !strconcat("bfe.", TyStr, " \t$d, $a, $b, $c;"), []>;
+ def rri
+ : NVPTXInst<(outs RC:$d),
+ (ins RC:$a, Int32Regs:$b, i32imm:$c),
+ !strconcat("bfe.", TyStr, " \t$d, $a, $b, $c;"), []>;
+ def rii
+ : NVPTXInst<(outs RC:$d),
+ (ins RC:$a, i32imm:$b, i32imm:$c),
+ !strconcat("bfe.", TyStr, " \t$d, $a, $b, $c;"), []>;
+}
+
+let hasSideEffects = 0 in {
+ defm BFE_S32 : BFE<"s32", Int32Regs>;
+ defm BFE_U32 : BFE<"u32", Int32Regs>;
+ defm BFE_S64 : BFE<"s64", Int64Regs>;
+ defm BFE_U64 : BFE<"u64", Int64Regs>;
+}
+
+//-----------------------------------
+// Comparison instructions (setp, set)
+//-----------------------------------
+
+// FIXME: This doesn't cover versions of set and setp that combine with a
+// boolean predicate, e.g. setp.eq.and.b16.
+
+let hasSideEffects = 0 in {
+ multiclass SETP<string TypeStr, RegisterClass RC, Operand ImmCls> {
+ def rr :
+ NVPTXInst<(outs Int1Regs:$dst), (ins RC:$a, RC:$b, CmpMode:$cmp),
+ !strconcat("setp${cmp:base}${cmp:ftz}.", TypeStr,
+ " \t$dst, $a, $b;"), []>;
+ def ri :
+ NVPTXInst<(outs Int1Regs:$dst), (ins RC:$a, ImmCls:$b, CmpMode:$cmp),
+ !strconcat("setp${cmp:base}${cmp:ftz}.", TypeStr,
+ " \t$dst, $a, $b;"), []>;
+ def ir :
+ NVPTXInst<(outs Int1Regs:$dst), (ins ImmCls:$a, RC:$b, CmpMode:$cmp),
+ !strconcat("setp${cmp:base}${cmp:ftz}.", TypeStr,
+ " \t$dst, $a, $b;"), []>;
+ }
+}
+
+defm SETP_b16 : SETP<"b16", Int16Regs, i16imm>;
+defm SETP_s16 : SETP<"s16", Int16Regs, i16imm>;
+defm SETP_u16 : SETP<"u16", Int16Regs, i16imm>;
+defm SETP_b32 : SETP<"b32", Int32Regs, i32imm>;
+defm SETP_s32 : SETP<"s32", Int32Regs, i32imm>;
+defm SETP_u32 : SETP<"u32", Int32Regs, i32imm>;
+defm SETP_b64 : SETP<"b64", Int64Regs, i64imm>;
+defm SETP_s64 : SETP<"s64", Int64Regs, i64imm>;
+defm SETP_u64 : SETP<"u64", Int64Regs, i64imm>;
+defm SETP_f32 : SETP<"f32", Float32Regs, f32imm>;
+defm SETP_f64 : SETP<"f64", Float64Regs, f64imm>;
+def SETP_f16rr :
+ NVPTXInst<(outs Int1Regs:$dst),
+ (ins Float16Regs:$a, Float16Regs:$b, CmpMode:$cmp),
+ "setp${cmp:base}${cmp:ftz}.f16 \t$dst, $a, $b;",
+ []>, Requires<[useFP16Math]>;
+
+def SETP_f16x2rr :
+ NVPTXInst<(outs Int1Regs:$p, Int1Regs:$q),
+ (ins Float16x2Regs:$a, Float16x2Regs:$b, CmpMode:$cmp),
+ "setp${cmp:base}${cmp:ftz}.f16x2 \t$p|$q, $a, $b;",
+ []>,
+ Requires<[useFP16Math]>;
+
+
+// FIXME: This doesn't appear to be correct. The "set" mnemonic has the form
+// "set.CmpOp{.ftz}.dtype.stype", where dtype is the type of the destination
+// reg, either u32, s32, or f32. Anyway these aren't used at the moment.
+
+let hasSideEffects = 0 in {
+ multiclass SET<string TypeStr, RegisterClass RC, Operand ImmCls> {
+ def rr : NVPTXInst<(outs Int32Regs:$dst),
+ (ins RC:$a, RC:$b, CmpMode:$cmp),
+ !strconcat("set$cmp.", TypeStr, " \t$dst, $a, $b;"), []>;
+ def ri : NVPTXInst<(outs Int32Regs:$dst),
+ (ins RC:$a, ImmCls:$b, CmpMode:$cmp),
+ !strconcat("set$cmp.", TypeStr, " \t$dst, $a, $b;"), []>;
+ def ir : NVPTXInst<(outs Int32Regs:$dst),
+ (ins ImmCls:$a, RC:$b, CmpMode:$cmp),
+ !strconcat("set$cmp.", TypeStr, " \t$dst, $a, $b;"), []>;
+ }
+}
+
+defm SET_b16 : SET<"b16", Int16Regs, i16imm>;
+defm SET_s16 : SET<"s16", Int16Regs, i16imm>;
+defm SET_u16 : SET<"u16", Int16Regs, i16imm>;
+defm SET_b32 : SET<"b32", Int32Regs, i32imm>;
+defm SET_s32 : SET<"s32", Int32Regs, i32imm>;
+defm SET_u32 : SET<"u32", Int32Regs, i32imm>;
+defm SET_b64 : SET<"b64", Int64Regs, i64imm>;
+defm SET_s64 : SET<"s64", Int64Regs, i64imm>;
+defm SET_u64 : SET<"u64", Int64Regs, i64imm>;
+defm SET_f16 : SET<"f16", Float16Regs, f16imm>;
+defm SET_f32 : SET<"f32", Float32Regs, f32imm>;
+defm SET_f64 : SET<"f64", Float64Regs, f64imm>;
+
+//-----------------------------------
+// Selection instructions (selp)
+//-----------------------------------
+
+// FIXME: Missing slct
+
+// selp instructions that don't have any pattern matches; we explicitly use
+// them within this file.
+let hasSideEffects = 0 in {
+ multiclass SELP<string TypeStr, RegisterClass RC, Operand ImmCls> {
+ def rr : NVPTXInst<(outs RC:$dst),
+ (ins RC:$a, RC:$b, Int1Regs:$p),
+ !strconcat("selp.", TypeStr, " \t$dst, $a, $b, $p;"), []>;
+ def ri : NVPTXInst<(outs RC:$dst),
+ (ins RC:$a, ImmCls:$b, Int1Regs:$p),
+ !strconcat("selp.", TypeStr, " \t$dst, $a, $b, $p;"), []>;
+ def ir : NVPTXInst<(outs RC:$dst),
+ (ins ImmCls:$a, RC:$b, Int1Regs:$p),
+ !strconcat("selp.", TypeStr, " \t$dst, $a, $b, $p;"), []>;
+ def ii : NVPTXInst<(outs RC:$dst),
+ (ins ImmCls:$a, ImmCls:$b, Int1Regs:$p),
+ !strconcat("selp.", TypeStr, " \t$dst, $a, $b, $p;"), []>;
+ }
+
+ multiclass SELP_PATTERN<string TypeStr, RegisterClass RC, Operand ImmCls,
+ SDNode ImmNode> {
+ def rr :
+ NVPTXInst<(outs RC:$dst),
+ (ins RC:$a, RC:$b, Int1Regs:$p),
+ !strconcat("selp.", TypeStr, " \t$dst, $a, $b, $p;"),
+ [(set RC:$dst, (select Int1Regs:$p, RC:$a, RC:$b))]>;
+ def ri :
+ NVPTXInst<(outs RC:$dst),
+ (ins RC:$a, ImmCls:$b, Int1Regs:$p),
+ !strconcat("selp.", TypeStr, " \t$dst, $a, $b, $p;"),
+ [(set RC:$dst, (select Int1Regs:$p, RC:$a, ImmNode:$b))]>;
+ def ir :
+ NVPTXInst<(outs RC:$dst),
+ (ins ImmCls:$a, RC:$b, Int1Regs:$p),
+ !strconcat("selp.", TypeStr, " \t$dst, $a, $b, $p;"),
+ [(set RC:$dst, (select Int1Regs:$p, ImmNode:$a, RC:$b))]>;
+ def ii :
+ NVPTXInst<(outs RC:$dst),
+ (ins ImmCls:$a, ImmCls:$b, Int1Regs:$p),
+ !strconcat("selp.", TypeStr, " \t$dst, $a, $b, $p;"),
+ [(set RC:$dst, (select Int1Regs:$p, ImmNode:$a, ImmNode:$b))]>;
+ }
+}
+
+// Don't pattern match on selp.{s,u}{16,32,64} -- selp.b{16,32,64} is just as
+// good.
+defm SELP_b16 : SELP_PATTERN<"b16", Int16Regs, i16imm, imm>;
+defm SELP_s16 : SELP<"s16", Int16Regs, i16imm>;
+defm SELP_u16 : SELP<"u16", Int16Regs, i16imm>;
+defm SELP_b32 : SELP_PATTERN<"b32", Int32Regs, i32imm, imm>;
+defm SELP_s32 : SELP<"s32", Int32Regs, i32imm>;
+defm SELP_u32 : SELP<"u32", Int32Regs, i32imm>;
+defm SELP_b64 : SELP_PATTERN<"b64", Int64Regs, i64imm, imm>;
+defm SELP_s64 : SELP<"s64", Int64Regs, i64imm>;
+defm SELP_u64 : SELP<"u64", Int64Regs, i64imm>;
+defm SELP_f16 : SELP_PATTERN<"b16", Float16Regs, f16imm, fpimm>;
+defm SELP_f32 : SELP_PATTERN<"f32", Float32Regs, f32imm, fpimm>;
+defm SELP_f64 : SELP_PATTERN<"f64", Float64Regs, f64imm, fpimm>;
+
+def SELP_f16x2rr :
+ NVPTXInst<(outs Float16x2Regs:$dst),
+ (ins Float16x2Regs:$a, Float16x2Regs:$b, Int1Regs:$p),
+ "selp.b32 \t$dst, $a, $b, $p;",
+ [(set Float16x2Regs:$dst,
+ (select Int1Regs:$p, Float16x2Regs:$a, Float16x2Regs:$b))]>;
+
+//-----------------------------------
+// Data Movement (Load / Store, Move)
+//-----------------------------------
+
+def ADDRri : ComplexPattern<i32, 2, "SelectADDRri", [frameindex],
+ [SDNPWantRoot]>;
+def ADDRri64 : ComplexPattern<i64, 2, "SelectADDRri64", [frameindex],
+ [SDNPWantRoot]>;
+
+def MEMri : Operand<i32> {
+ let PrintMethod = "printMemOperand";
+ let MIOperandInfo = (ops Int32Regs, i32imm);
+}
+def MEMri64 : Operand<i64> {
+ let PrintMethod = "printMemOperand";
+ let MIOperandInfo = (ops Int64Regs, i64imm);
+}
+
+def imem : Operand<iPTR> {
+ let PrintMethod = "printOperand";
+}
+
+def imemAny : Operand<iPTRAny> {
+ let PrintMethod = "printOperand";
+}
+
+def LdStCode : Operand<i32> {
+ let PrintMethod = "printLdStCode";
+}
+
+def SDTWrapper : SDTypeProfile<1, 1, [SDTCisSameAs<0, 1>, SDTCisPtrTy<0>]>;
+def Wrapper : SDNode<"NVPTXISD::Wrapper", SDTWrapper>;
+
+// Load a memory address into a u32 or u64 register.
+def MOV_ADDR : NVPTXInst<(outs Int32Regs:$dst), (ins imem:$a),
+ "mov.u32 \t$dst, $a;",
+ [(set Int32Regs:$dst, (Wrapper tglobaladdr:$a))]>;
+def MOV_ADDR64 : NVPTXInst<(outs Int64Regs:$dst), (ins imem:$a),
+ "mov.u64 \t$dst, $a;",
+ [(set Int64Regs:$dst, (Wrapper tglobaladdr:$a))]>;
+
+// Get pointer to local stack.
+let hasSideEffects = 0 in {
+ def MOV_DEPOT_ADDR : NVPTXInst<(outs Int32Regs:$d), (ins i32imm:$num),
+ "mov.u32 \t$d, __local_depot$num;", []>;
+ def MOV_DEPOT_ADDR_64 : NVPTXInst<(outs Int64Regs:$d), (ins i32imm:$num),
+ "mov.u64 \t$d, __local_depot$num;", []>;
+}
+
+
+// copyPhysreg is hard-coded in NVPTXInstrInfo.cpp
+let IsSimpleMove=1, hasSideEffects=0 in {
+ def IMOV1rr : NVPTXInst<(outs Int1Regs:$dst), (ins Int1Regs:$sss),
+ "mov.pred \t$dst, $sss;", []>;
+ def IMOV16rr : NVPTXInst<(outs Int16Regs:$dst), (ins Int16Regs:$sss),
+ "mov.u16 \t$dst, $sss;", []>;
+ def IMOV32rr : NVPTXInst<(outs Int32Regs:$dst), (ins Int32Regs:$sss),
+ "mov.u32 \t$dst, $sss;", []>;
+ def IMOV64rr : NVPTXInst<(outs Int64Regs:$dst), (ins Int64Regs:$sss),
+ "mov.u64 \t$dst, $sss;", []>;
+
+ def FMOV16rr : NVPTXInst<(outs Float16Regs:$dst), (ins Float16Regs:$src),
+ // We have to use .b16 here as there's no mov.f16.
+ "mov.b16 \t$dst, $src;", []>;
+ def FMOV32rr : NVPTXInst<(outs Float32Regs:$dst), (ins Float32Regs:$src),
+ "mov.f32 \t$dst, $src;", []>;
+ def FMOV64rr : NVPTXInst<(outs Float64Regs:$dst), (ins Float64Regs:$src),
+ "mov.f64 \t$dst, $src;", []>;
+}
+
+def IMOV1ri : NVPTXInst<(outs Int1Regs:$dst), (ins i1imm:$src),
+ "mov.pred \t$dst, $src;",
+ [(set Int1Regs:$dst, imm:$src)]>;
+def IMOV16ri : NVPTXInst<(outs Int16Regs:$dst), (ins i16imm:$src),
+ "mov.u16 \t$dst, $src;",
+ [(set Int16Regs:$dst, imm:$src)]>;
+def IMOV32ri : NVPTXInst<(outs Int32Regs:$dst), (ins i32imm:$src),
+ "mov.u32 \t$dst, $src;",
+ [(set Int32Regs:$dst, imm:$src)]>;
+def IMOV64i : NVPTXInst<(outs Int64Regs:$dst), (ins i64imm:$src),
+ "mov.u64 \t$dst, $src;",
+ [(set Int64Regs:$dst, imm:$src)]>;
+
+def FMOV32ri : NVPTXInst<(outs Float32Regs:$dst), (ins f32imm:$src),
+ "mov.f32 \t$dst, $src;",
+ [(set Float32Regs:$dst, fpimm:$src)]>;
+def FMOV64ri : NVPTXInst<(outs Float64Regs:$dst), (ins f64imm:$src),
+ "mov.f64 \t$dst, $src;",
+ [(set Float64Regs:$dst, fpimm:$src)]>;
+
+def : Pat<(i32 (Wrapper texternalsym:$dst)), (IMOV32ri texternalsym:$dst)>;
+
+//---- Copy Frame Index ----
+def LEA_ADDRi : NVPTXInst<(outs Int32Regs:$dst), (ins MEMri:$addr),
+ "add.u32 \t$dst, ${addr:add};",
+ [(set Int32Regs:$dst, ADDRri:$addr)]>;
+def LEA_ADDRi64 : NVPTXInst<(outs Int64Regs:$dst), (ins MEMri64:$addr),
+ "add.u64 \t$dst, ${addr:add};",
+ [(set Int64Regs:$dst, ADDRri64:$addr)]>;
+
+//-----------------------------------
+// Comparison and Selection
+//-----------------------------------
+
+multiclass ISET_FORMAT<PatFrag OpNode, PatLeaf Mode,
+ Instruction setp_16rr,
+ Instruction setp_16ri,
+ Instruction setp_16ir,
+ Instruction setp_32rr,
+ Instruction setp_32ri,
+ Instruction setp_32ir,
+ Instruction setp_64rr,
+ Instruction setp_64ri,
+ Instruction setp_64ir,
+ Instruction set_16rr,
+ Instruction set_16ri,
+ Instruction set_16ir,
+ Instruction set_32rr,
+ Instruction set_32ri,
+ Instruction set_32ir,
+ Instruction set_64rr,
+ Instruction set_64ri,
+ Instruction set_64ir> {
+ // i16 -> pred
+ def : Pat<(i1 (OpNode Int16Regs:$a, Int16Regs:$b)),
+ (setp_16rr Int16Regs:$a, Int16Regs:$b, Mode)>;
+ def : Pat<(i1 (OpNode Int16Regs:$a, imm:$b)),
+ (setp_16ri Int16Regs:$a, imm:$b, Mode)>;
+ def : Pat<(i1 (OpNode imm:$a, Int16Regs:$b)),
+ (setp_16ir imm:$a, Int16Regs:$b, Mode)>;
+ // i32 -> pred
+ def : Pat<(i1 (OpNode Int32Regs:$a, Int32Regs:$b)),
+ (setp_32rr Int32Regs:$a, Int32Regs:$b, Mode)>;
+ def : Pat<(i1 (OpNode Int32Regs:$a, imm:$b)),
+ (setp_32ri Int32Regs:$a, imm:$b, Mode)>;
+ def : Pat<(i1 (OpNode imm:$a, Int32Regs:$b)),
+ (setp_32ir imm:$a, Int32Regs:$b, Mode)>;
+ // i64 -> pred
+ def : Pat<(i1 (OpNode Int64Regs:$a, Int64Regs:$b)),
+ (setp_64rr Int64Regs:$a, Int64Regs:$b, Mode)>;
+ def : Pat<(i1 (OpNode Int64Regs:$a, imm:$b)),
+ (setp_64ri Int64Regs:$a, imm:$b, Mode)>;
+ def : Pat<(i1 (OpNode imm:$a, Int64Regs:$b)),
+ (setp_64ir imm:$a, Int64Regs:$b, Mode)>;
+
+ // i16 -> i32
+ def : Pat<(i32 (OpNode Int16Regs:$a, Int16Regs:$b)),
+ (set_16rr Int16Regs:$a, Int16Regs:$b, Mode)>;
+ def : Pat<(i32 (OpNode Int16Regs:$a, imm:$b)),
+ (set_16ri Int16Regs:$a, imm:$b, Mode)>;
+ def : Pat<(i32 (OpNode imm:$a, Int16Regs:$b)),
+ (set_16ir imm:$a, Int16Regs:$b, Mode)>;
+ // i32 -> i32
+ def : Pat<(i32 (OpNode Int32Regs:$a, Int32Regs:$b)),
+ (set_32rr Int32Regs:$a, Int32Regs:$b, Mode)>;
+ def : Pat<(i32 (OpNode Int32Regs:$a, imm:$b)),
+ (set_32ri Int32Regs:$a, imm:$b, Mode)>;
+ def : Pat<(i32 (OpNode imm:$a, Int32Regs:$b)),
+ (set_32ir imm:$a, Int32Regs:$b, Mode)>;
+ // i64 -> i32
+ def : Pat<(i32 (OpNode Int64Regs:$a, Int64Regs:$b)),
+ (set_64rr Int64Regs:$a, Int64Regs:$b, Mode)>;
+ def : Pat<(i32 (OpNode Int64Regs:$a, imm:$b)),
+ (set_64ri Int64Regs:$a, imm:$b, Mode)>;
+ def : Pat<(i32 (OpNode imm:$a, Int64Regs:$b)),
+ (set_64ir imm:$a, Int64Regs:$b, Mode)>;
+}
+
+multiclass ISET_FORMAT_SIGNED<PatFrag OpNode, PatLeaf Mode>
+ : ISET_FORMAT<OpNode, Mode,
+ SETP_s16rr, SETP_s16ri, SETP_s16ir,
+ SETP_s32rr, SETP_s32ri, SETP_s32ir,
+ SETP_s64rr, SETP_s64ri, SETP_s64ir,
+ SET_s16rr, SET_s16ri, SET_s16ir,
+ SET_s32rr, SET_s32ri, SET_s32ir,
+ SET_s64rr, SET_s64ri, SET_s64ir> {
+ // TableGen doesn't like empty multiclasses.
+ def : PatLeaf<(i32 0)>;
+}
+
+multiclass ISET_FORMAT_UNSIGNED<PatFrag OpNode, PatLeaf Mode>
+ : ISET_FORMAT<OpNode, Mode,
+ SETP_u16rr, SETP_u16ri, SETP_u16ir,
+ SETP_u32rr, SETP_u32ri, SETP_u32ir,
+ SETP_u64rr, SETP_u64ri, SETP_u64ir,
+ SET_u16rr, SET_u16ri, SET_u16ir,
+ SET_u32rr, SET_u32ri, SET_u32ir,
+ SET_u64rr, SET_u64ri, SET_u64ir> {
+ // TableGen doesn't like empty multiclasses.
+ def : PatLeaf<(i32 0)>;
+}
+
+defm : ISET_FORMAT_SIGNED<setgt, CmpGT>;
+defm : ISET_FORMAT_SIGNED<setlt, CmpLT>;
+defm : ISET_FORMAT_SIGNED<setge, CmpGE>;
+defm : ISET_FORMAT_SIGNED<setle, CmpLE>;
+defm : ISET_FORMAT_SIGNED<seteq, CmpEQ>;
+defm : ISET_FORMAT_SIGNED<setne, CmpNE>;
+defm : ISET_FORMAT_UNSIGNED<setugt, CmpGT>;
+defm : ISET_FORMAT_UNSIGNED<setult, CmpLT>;
+defm : ISET_FORMAT_UNSIGNED<setuge, CmpGE>;
+defm : ISET_FORMAT_UNSIGNED<setule, CmpLE>;
+defm : ISET_FORMAT_UNSIGNED<setueq, CmpEQ>;
+defm : ISET_FORMAT_UNSIGNED<setune, CmpNE>;
+
+// i1 compares
+def : Pat<(setne Int1Regs:$a, Int1Regs:$b),
+ (XORb1rr Int1Regs:$a, Int1Regs:$b)>;
+def : Pat<(setune Int1Regs:$a, Int1Regs:$b),
+ (XORb1rr Int1Regs:$a, Int1Regs:$b)>;
+
+def : Pat<(seteq Int1Regs:$a, Int1Regs:$b),
+ (NOT1 (XORb1rr Int1Regs:$a, Int1Regs:$b))>;
+def : Pat<(setueq Int1Regs:$a, Int1Regs:$b),
+ (NOT1 (XORb1rr Int1Regs:$a, Int1Regs:$b))>;
+
+// i1 compare -> i32
+def : Pat<(i32 (setne Int1Regs:$a, Int1Regs:$b)),
+ (SELP_u32ii -1, 0, (XORb1rr Int1Regs:$a, Int1Regs:$b))>;
+def : Pat<(i32 (setne Int1Regs:$a, Int1Regs:$b)),
+ (SELP_u32ii 0, -1, (XORb1rr Int1Regs:$a, Int1Regs:$b))>;
+
+
+
+multiclass FSET_FORMAT<PatFrag OpNode, PatLeaf Mode, PatLeaf ModeFTZ> {
+ // f16 -> pred
+ def : Pat<(i1 (OpNode Float16Regs:$a, Float16Regs:$b)),
+ (SETP_f16rr Float16Regs:$a, Float16Regs:$b, ModeFTZ)>,
+ Requires<[useFP16Math,doF32FTZ]>;
+ def : Pat<(i1 (OpNode Float16Regs:$a, Float16Regs:$b)),
+ (SETP_f16rr Float16Regs:$a, Float16Regs:$b, Mode)>,
+ Requires<[useFP16Math]>;
+ def : Pat<(i1 (OpNode Float16Regs:$a, fpimm:$b)),
+ (SETP_f16rr Float16Regs:$a, (LOAD_CONST_F16 fpimm:$b), ModeFTZ)>,
+ Requires<[useFP16Math,doF32FTZ]>;
+ def : Pat<(i1 (OpNode Float16Regs:$a, fpimm:$b)),
+ (SETP_f16rr Float16Regs:$a, (LOAD_CONST_F16 fpimm:$b), Mode)>,
+ Requires<[useFP16Math]>;
+ def : Pat<(i1 (OpNode fpimm:$a, Float16Regs:$b)),
+ (SETP_f16rr (LOAD_CONST_F16 fpimm:$a), Float16Regs:$b, ModeFTZ)>,
+ Requires<[useFP16Math,doF32FTZ]>;
+ def : Pat<(i1 (OpNode fpimm:$a, Float16Regs:$b)),
+ (SETP_f16rr (LOAD_CONST_F16 fpimm:$a), Float16Regs:$b, Mode)>,
+ Requires<[useFP16Math]>;
+
+ // f32 -> pred
+ def : Pat<(i1 (OpNode Float32Regs:$a, Float32Regs:$b)),
+ (SETP_f32rr Float32Regs:$a, Float32Regs:$b, ModeFTZ)>,
+ Requires<[doF32FTZ]>;
+ def : Pat<(i1 (OpNode Float32Regs:$a, Float32Regs:$b)),
+ (SETP_f32rr Float32Regs:$a, Float32Regs:$b, Mode)>;
+ def : Pat<(i1 (OpNode Float32Regs:$a, fpimm:$b)),
+ (SETP_f32ri Float32Regs:$a, fpimm:$b, ModeFTZ)>,
+ Requires<[doF32FTZ]>;
+ def : Pat<(i1 (OpNode Float32Regs:$a, fpimm:$b)),
+ (SETP_f32ri Float32Regs:$a, fpimm:$b, Mode)>;
+ def : Pat<(i1 (OpNode fpimm:$a, Float32Regs:$b)),
+ (SETP_f32ir fpimm:$a, Float32Regs:$b, ModeFTZ)>,
+ Requires<[doF32FTZ]>;
+ def : Pat<(i1 (OpNode fpimm:$a, Float32Regs:$b)),
+ (SETP_f32ir fpimm:$a, Float32Regs:$b, Mode)>;
+
+ // f64 -> pred
+ def : Pat<(i1 (OpNode Float64Regs:$a, Float64Regs:$b)),
+ (SETP_f64rr Float64Regs:$a, Float64Regs:$b, Mode)>;
+ def : Pat<(i1 (OpNode Float64Regs:$a, fpimm:$b)),
+ (SETP_f64ri Float64Regs:$a, fpimm:$b, Mode)>;
+ def : Pat<(i1 (OpNode fpimm:$a, Float64Regs:$b)),
+ (SETP_f64ir fpimm:$a, Float64Regs:$b, Mode)>;
+
+ // f16 -> i32
+ def : Pat<(i32 (OpNode Float16Regs:$a, Float16Regs:$b)),
+ (SET_f16rr Float16Regs:$a, Float16Regs:$b, ModeFTZ)>,
+ Requires<[useFP16Math, doF32FTZ]>;
+ def : Pat<(i32 (OpNode Float16Regs:$a, Float16Regs:$b)),
+ (SET_f16rr Float16Regs:$a, Float16Regs:$b, Mode)>,
+ Requires<[useFP16Math]>;
+ def : Pat<(i32 (OpNode Float16Regs:$a, fpimm:$b)),
+ (SET_f16rr Float16Regs:$a, (LOAD_CONST_F16 fpimm:$b), ModeFTZ)>,
+ Requires<[useFP16Math, doF32FTZ]>;
+ def : Pat<(i32 (OpNode Float16Regs:$a, fpimm:$b)),
+ (SET_f16rr Float16Regs:$a, (LOAD_CONST_F16 fpimm:$b), Mode)>,
+ Requires<[useFP16Math]>;
+ def : Pat<(i32 (OpNode fpimm:$a, Float16Regs:$b)),
+ (SET_f16ir (LOAD_CONST_F16 fpimm:$a), Float16Regs:$b, ModeFTZ)>,
+ Requires<[useFP16Math, doF32FTZ]>;
+ def : Pat<(i32 (OpNode fpimm:$a, Float16Regs:$b)),
+ (SET_f16ir (LOAD_CONST_F16 fpimm:$a), Float16Regs:$b, Mode)>,
+ Requires<[useFP16Math]>;
+
+ // f32 -> i32
+ def : Pat<(i32 (OpNode Float32Regs:$a, Float32Regs:$b)),
+ (SET_f32rr Float32Regs:$a, Float32Regs:$b, ModeFTZ)>,
+ Requires<[doF32FTZ]>;
+ def : Pat<(i32 (OpNode Float32Regs:$a, Float32Regs:$b)),
+ (SET_f32rr Float32Regs:$a, Float32Regs:$b, Mode)>;
+ def : Pat<(i32 (OpNode Float32Regs:$a, fpimm:$b)),
+ (SET_f32ri Float32Regs:$a, fpimm:$b, ModeFTZ)>,
+ Requires<[doF32FTZ]>;
+ def : Pat<(i32 (OpNode Float32Regs:$a, fpimm:$b)),
+ (SET_f32ri Float32Regs:$a, fpimm:$b, Mode)>;
+ def : Pat<(i32 (OpNode fpimm:$a, Float32Regs:$b)),
+ (SET_f32ir fpimm:$a, Float32Regs:$b, ModeFTZ)>,
+ Requires<[doF32FTZ]>;
+ def : Pat<(i32 (OpNode fpimm:$a, Float32Regs:$b)),
+ (SET_f32ir fpimm:$a, Float32Regs:$b, Mode)>;
+
+ // f64 -> i32
+ def : Pat<(i32 (OpNode Float64Regs:$a, Float64Regs:$b)),
+ (SET_f64rr Float64Regs:$a, Float64Regs:$b, Mode)>;
+ def : Pat<(i32 (OpNode Float64Regs:$a, fpimm:$b)),
+ (SET_f64ri Float64Regs:$a, fpimm:$b, Mode)>;
+ def : Pat<(i32 (OpNode fpimm:$a, Float64Regs:$b)),
+ (SET_f64ir fpimm:$a, Float64Regs:$b, Mode)>;
+}
+
+defm FSetOGT : FSET_FORMAT<setogt, CmpGT, CmpGT_FTZ>;
+defm FSetOLT : FSET_FORMAT<setolt, CmpLT, CmpLT_FTZ>;
+defm FSetOGE : FSET_FORMAT<setoge, CmpGE, CmpGE_FTZ>;
+defm FSetOLE : FSET_FORMAT<setole, CmpLE, CmpLE_FTZ>;
+defm FSetOEQ : FSET_FORMAT<setoeq, CmpEQ, CmpEQ_FTZ>;
+defm FSetONE : FSET_FORMAT<setone, CmpNE, CmpNE_FTZ>;
+
+defm FSetUGT : FSET_FORMAT<setugt, CmpGTU, CmpGTU_FTZ>;
+defm FSetULT : FSET_FORMAT<setult, CmpLTU, CmpLTU_FTZ>;
+defm FSetUGE : FSET_FORMAT<setuge, CmpGEU, CmpGEU_FTZ>;
+defm FSetULE : FSET_FORMAT<setule, CmpLEU, CmpLEU_FTZ>;
+defm FSetUEQ : FSET_FORMAT<setueq, CmpEQU, CmpEQU_FTZ>;
+defm FSetUNE : FSET_FORMAT<setune, CmpNEU, CmpNEU_FTZ>;
+
+defm FSetGT : FSET_FORMAT<setgt, CmpGT, CmpGT_FTZ>;
+defm FSetLT : FSET_FORMAT<setlt, CmpLT, CmpLT_FTZ>;
+defm FSetGE : FSET_FORMAT<setge, CmpGE, CmpGE_FTZ>;
+defm FSetLE : FSET_FORMAT<setle, CmpLE, CmpLE_FTZ>;
+defm FSetEQ : FSET_FORMAT<seteq, CmpEQ, CmpEQ_FTZ>;
+defm FSetNE : FSET_FORMAT<setne, CmpNE, CmpNE_FTZ>;
+
+defm FSetNUM : FSET_FORMAT<seto, CmpNUM, CmpNUM_FTZ>;
+defm FSetNAN : FSET_FORMAT<setuo, CmpNAN, CmpNAN_FTZ>;
+
+// FIXME: What is this doing here? Can it be deleted?
+// def ld_param : SDNode<"NVPTXISD::LOAD_PARAM", SDTLoad,
+// [SDNPHasChain, SDNPMayLoad, SDNPMemOperand]>;
+
+def SDTDeclareParamProfile :
+ SDTypeProfile<0, 3, [SDTCisInt<0>, SDTCisInt<1>, SDTCisInt<2>]>;
+def SDTDeclareScalarParamProfile :
+ SDTypeProfile<0, 3, [SDTCisInt<0>, SDTCisInt<1>, SDTCisInt<2>]>;
+def SDTLoadParamProfile : SDTypeProfile<1, 2, [SDTCisInt<1>, SDTCisInt<2>]>;
+def SDTLoadParamV2Profile : SDTypeProfile<2, 2, [SDTCisSameAs<0, 1>, SDTCisInt<2>, SDTCisInt<3>]>;
+def SDTLoadParamV4Profile : SDTypeProfile<4, 2, [SDTCisInt<4>, SDTCisInt<5>]>;
+def SDTPrintCallProfile : SDTypeProfile<0, 1, [SDTCisInt<0>]>;
+def SDTPrintCallUniProfile : SDTypeProfile<0, 1, [SDTCisInt<0>]>;
+def SDTStoreParamProfile : SDTypeProfile<0, 3, [SDTCisInt<0>, SDTCisInt<1>]>;
+def SDTStoreParamV2Profile : SDTypeProfile<0, 4, [SDTCisInt<0>, SDTCisInt<1>]>;
+def SDTStoreParamV4Profile : SDTypeProfile<0, 6, [SDTCisInt<0>, SDTCisInt<1>]>;
+def SDTStoreParam32Profile : SDTypeProfile<0, 3, [SDTCisInt<0>, SDTCisInt<1>]>;
+def SDTCallArgProfile : SDTypeProfile<0, 2, [SDTCisInt<0>]>;
+def SDTCallArgMarkProfile : SDTypeProfile<0, 0, []>;
+def SDTCallVoidProfile : SDTypeProfile<0, 1, []>;
+def SDTCallValProfile : SDTypeProfile<1, 0, []>;
+def SDTMoveParamProfile : SDTypeProfile<1, 1, []>;
+def SDTStoreRetvalProfile : SDTypeProfile<0, 2, [SDTCisInt<0>]>;
+def SDTStoreRetvalV2Profile : SDTypeProfile<0, 3, [SDTCisInt<0>]>;
+def SDTStoreRetvalV4Profile : SDTypeProfile<0, 5, [SDTCisInt<0>]>;
+def SDTPseudoUseParamProfile : SDTypeProfile<0, 1, []>;
+
+def DeclareParam :
+ SDNode<"NVPTXISD::DeclareParam", SDTDeclareParamProfile,
+ [SDNPHasChain, SDNPOutGlue, SDNPInGlue, SDNPSideEffect]>;
+def DeclareScalarParam :
+ SDNode<"NVPTXISD::DeclareScalarParam", SDTDeclareScalarParamProfile,
+ [SDNPHasChain, SDNPOutGlue, SDNPInGlue, SDNPSideEffect]>;
+def DeclareRetParam :
+ SDNode<"NVPTXISD::DeclareRetParam", SDTDeclareParamProfile,
+ [SDNPHasChain, SDNPOutGlue, SDNPInGlue, SDNPSideEffect]>;
+def DeclareRet :
+ SDNode<"NVPTXISD::DeclareRet", SDTDeclareScalarParamProfile,
+ [SDNPHasChain, SDNPOutGlue, SDNPInGlue, SDNPSideEffect]>;
+def LoadParam :
+ SDNode<"NVPTXISD::LoadParam", SDTLoadParamProfile,
+ [SDNPHasChain, SDNPMayLoad, SDNPOutGlue, SDNPInGlue]>;
+def LoadParamV2 :
+ SDNode<"NVPTXISD::LoadParamV2", SDTLoadParamV2Profile,
+ [SDNPHasChain, SDNPMayLoad, SDNPOutGlue, SDNPInGlue]>;
+def LoadParamV4 :
+ SDNode<"NVPTXISD::LoadParamV4", SDTLoadParamV4Profile,
+ [SDNPHasChain, SDNPMayLoad, SDNPOutGlue, SDNPInGlue]>;
+def PrintCall :
+ SDNode<"NVPTXISD::PrintCall", SDTPrintCallProfile,
+ [SDNPHasChain, SDNPOutGlue, SDNPInGlue, SDNPSideEffect]>;
+def PrintConvergentCall :
+ SDNode<"NVPTXISD::PrintConvergentCall", SDTPrintCallProfile,
+ [SDNPHasChain, SDNPOutGlue, SDNPInGlue, SDNPSideEffect]>;
+def PrintCallUni :
+ SDNode<"NVPTXISD::PrintCallUni", SDTPrintCallUniProfile,
+ [SDNPHasChain, SDNPOutGlue, SDNPInGlue, SDNPSideEffect]>;
+def PrintConvergentCallUni :
+ SDNode<"NVPTXISD::PrintConvergentCallUni", SDTPrintCallUniProfile,
+ [SDNPHasChain, SDNPOutGlue, SDNPInGlue, SDNPSideEffect]>;
+def StoreParam :
+ SDNode<"NVPTXISD::StoreParam", SDTStoreParamProfile,
+ [SDNPHasChain, SDNPOutGlue, SDNPInGlue, SDNPSideEffect]>;
+def StoreParamV2 :
+ SDNode<"NVPTXISD::StoreParamV2", SDTStoreParamV2Profile,
+ [SDNPHasChain, SDNPOutGlue, SDNPInGlue, SDNPSideEffect]>;
+def StoreParamV4 :
+ SDNode<"NVPTXISD::StoreParamV4", SDTStoreParamV4Profile,
+ [SDNPHasChain, SDNPOutGlue, SDNPInGlue, SDNPSideEffect]>;
+def StoreParamU32 :
+ SDNode<"NVPTXISD::StoreParamU32", SDTStoreParam32Profile,
+ [SDNPHasChain, SDNPOutGlue, SDNPInGlue, SDNPSideEffect]>;
+def StoreParamS32 :
+ SDNode<"NVPTXISD::StoreParamS32", SDTStoreParam32Profile,
+ [SDNPHasChain, SDNPOutGlue, SDNPInGlue, SDNPSideEffect]>;
+def CallArgBegin :
+ SDNode<"NVPTXISD::CallArgBegin", SDTCallArgMarkProfile,
+ [SDNPHasChain, SDNPOutGlue, SDNPInGlue, SDNPSideEffect]>;
+def CallArg :
+ SDNode<"NVPTXISD::CallArg", SDTCallArgProfile,
+ [SDNPHasChain, SDNPOutGlue, SDNPInGlue, SDNPSideEffect]>;
+def LastCallArg :
+ SDNode<"NVPTXISD::LastCallArg", SDTCallArgProfile,
+ [SDNPHasChain, SDNPOutGlue, SDNPInGlue, SDNPSideEffect]>;
+def CallArgEnd :
+ SDNode<"NVPTXISD::CallArgEnd", SDTCallVoidProfile,
+ [SDNPHasChain, SDNPOutGlue, SDNPInGlue, SDNPSideEffect]>;
+def CallVoid :
+ SDNode<"NVPTXISD::CallVoid", SDTCallVoidProfile,
+ [SDNPHasChain, SDNPOutGlue, SDNPInGlue, SDNPSideEffect]>;
+def Prototype :
+ SDNode<"NVPTXISD::Prototype", SDTCallVoidProfile,
+ [SDNPHasChain, SDNPOutGlue, SDNPInGlue, SDNPSideEffect]>;
+def CallVal :
+ SDNode<"NVPTXISD::CallVal", SDTCallValProfile,
+ [SDNPHasChain, SDNPOutGlue, SDNPInGlue, SDNPSideEffect]>;
+def MoveParam :
+ SDNode<"NVPTXISD::MoveParam", SDTMoveParamProfile, []>;
+def StoreRetval :
+ SDNode<"NVPTXISD::StoreRetval", SDTStoreRetvalProfile,
+ [SDNPHasChain, SDNPSideEffect]>;
+def StoreRetvalV2 :
+ SDNode<"NVPTXISD::StoreRetvalV2", SDTStoreRetvalV2Profile,
+ [SDNPHasChain, SDNPSideEffect]>;
+def StoreRetvalV4 :
+ SDNode<"NVPTXISD::StoreRetvalV4", SDTStoreRetvalV4Profile,
+ [SDNPHasChain, SDNPSideEffect]>;
+def PseudoUseParam :
+ SDNode<"NVPTXISD::PseudoUseParam", SDTPseudoUseParamProfile,
+ [SDNPHasChain, SDNPOutGlue, SDNPInGlue, SDNPSideEffect]>;
+def RETURNNode :
+ SDNode<"NVPTXISD::RETURN", SDTCallArgMarkProfile,
+ [SDNPHasChain, SDNPSideEffect]>;
+
+let mayLoad = 1 in {
+ class LoadParamMemInst<NVPTXRegClass regclass, string opstr> :
+ NVPTXInst<(outs regclass:$dst), (ins i32imm:$b),
+ !strconcat("ld.param", opstr, " \t$dst, [retval0+$b];"),
+ []>;
+
+ class LoadParamV2MemInst<NVPTXRegClass regclass, string opstr> :
+ NVPTXInst<(outs regclass:$dst, regclass:$dst2), (ins i32imm:$b),
+ !strconcat("ld.param.v2", opstr,
+ " \t{{$dst, $dst2}}, [retval0+$b];"), []>;
+
+ class LoadParamV4MemInst<NVPTXRegClass regclass, string opstr> :
+ NVPTXInst<(outs regclass:$dst, regclass:$dst2, regclass:$dst3,
+ regclass:$dst4),
+ (ins i32imm:$b),
+ !strconcat("ld.param.v4", opstr,
+ " \t{{$dst, $dst2, $dst3, $dst4}}, [retval0+$b];"),
+ []>;
+}
+
+class LoadParamRegInst<NVPTXRegClass regclass, string opstr> :
+ NVPTXInst<(outs regclass:$dst), (ins i32imm:$b),
+ !strconcat("mov", opstr, " \t$dst, retval$b;"),
+ [(set regclass:$dst, (LoadParam (i32 0), (i32 imm:$b)))]>;
+
+let mayStore = 1 in {
+ class StoreParamInst<NVPTXRegClass regclass, string opstr> :
+ NVPTXInst<(outs), (ins regclass:$val, i32imm:$a, i32imm:$b),
+ !strconcat("st.param", opstr, " \t[param$a+$b], $val;"),
+ []>;
+
+ class StoreParamV2Inst<NVPTXRegClass regclass, string opstr> :
+ NVPTXInst<(outs), (ins regclass:$val, regclass:$val2,
+ i32imm:$a, i32imm:$b),
+ !strconcat("st.param.v2", opstr,
+ " \t[param$a+$b], {{$val, $val2}};"),
+ []>;
+
+ class StoreParamV4Inst<NVPTXRegClass regclass, string opstr> :
+ NVPTXInst<(outs), (ins regclass:$val, regclass:$val2, regclass:$val3,
+ regclass:$val4, i32imm:$a,
+ i32imm:$b),
+ !strconcat("st.param.v4", opstr,
+ " \t[param$a+$b], {{$val, $val2, $val3, $val4}};"),
+ []>;
+
+ class StoreRetvalInst<NVPTXRegClass regclass, string opstr> :
+ NVPTXInst<(outs), (ins regclass:$val, i32imm:$a),
+ !strconcat("st.param", opstr, " \t[func_retval0+$a], $val;"),
+ []>;
+
+ class StoreRetvalV2Inst<NVPTXRegClass regclass, string opstr> :
+ NVPTXInst<(outs), (ins regclass:$val, regclass:$val2, i32imm:$a),
+ !strconcat("st.param.v2", opstr,
+ " \t[func_retval0+$a], {{$val, $val2}};"),
+ []>;
+
+ class StoreRetvalV4Inst<NVPTXRegClass regclass, string opstr> :
+ NVPTXInst<(outs),
+ (ins regclass:$val, regclass:$val2, regclass:$val3,
+ regclass:$val4, i32imm:$a),
+ !strconcat("st.param.v4", opstr,
+ " \t[func_retval0+$a], {{$val, $val2, $val3, $val4}};"),
+ []>;
+}
+
+let isCall=1 in {
+ multiclass CALL<string OpcStr, SDNode OpNode> {
+ def PrintCallNoRetInst : NVPTXInst<(outs), (ins),
+ !strconcat(OpcStr, " "), [(OpNode (i32 0))]>;
+ def PrintCallRetInst1 : NVPTXInst<(outs), (ins),
+ !strconcat(OpcStr, " (retval0), "), [(OpNode (i32 1))]>;
+ def PrintCallRetInst2 : NVPTXInst<(outs), (ins),
+ !strconcat(OpcStr, " (retval0, retval1), "), [(OpNode (i32 2))]>;
+ def PrintCallRetInst3 : NVPTXInst<(outs), (ins),
+ !strconcat(OpcStr, " (retval0, retval1, retval2), "), [(OpNode (i32 3))]>;
+ def PrintCallRetInst4 : NVPTXInst<(outs), (ins),
+ !strconcat(OpcStr, " (retval0, retval1, retval2, retval3), "),
+ [(OpNode (i32 4))]>;
+ def PrintCallRetInst5 : NVPTXInst<(outs), (ins),
+ !strconcat(OpcStr, " (retval0, retval1, retval2, retval3, retval4), "),
+ [(OpNode (i32 5))]>;
+ def PrintCallRetInst6 : NVPTXInst<(outs), (ins),
+ !strconcat(OpcStr, " (retval0, retval1, retval2, retval3, retval4, "
+ "retval5), "),
+ [(OpNode (i32 6))]>;
+ def PrintCallRetInst7 : NVPTXInst<(outs), (ins),
+ !strconcat(OpcStr, " (retval0, retval1, retval2, retval3, retval4, "
+ "retval5, retval6), "),
+ [(OpNode (i32 7))]>;
+ def PrintCallRetInst8 : NVPTXInst<(outs), (ins),
+ !strconcat(OpcStr, " (retval0, retval1, retval2, retval3, retval4, "
+ "retval5, retval6, retval7), "),
+ [(OpNode (i32 8))]>;
+ }
+}
+
+defm Call : CALL<"call", PrintCall>;
+defm CallUni : CALL<"call.uni", PrintCallUni>;
+
+// Convergent call instructions. These are identical to regular calls, except
+// they have the isConvergent bit set.
+let isConvergent=1 in {
+ defm ConvergentCall : CALL<"call", PrintConvergentCall>;
+ defm ConvergentCallUni : CALL<"call.uni", PrintConvergentCallUni>;
+}
+
+def LoadParamMemI64 : LoadParamMemInst<Int64Regs, ".b64">;
+def LoadParamMemI32 : LoadParamMemInst<Int32Regs, ".b32">;
+def LoadParamMemI16 : LoadParamMemInst<Int16Regs, ".b16">;
+def LoadParamMemI8 : LoadParamMemInst<Int16Regs, ".b8">;
+def LoadParamMemV2I64 : LoadParamV2MemInst<Int64Regs, ".b64">;
+def LoadParamMemV2I32 : LoadParamV2MemInst<Int32Regs, ".b32">;
+def LoadParamMemV2I16 : LoadParamV2MemInst<Int16Regs, ".b16">;
+def LoadParamMemV2I8 : LoadParamV2MemInst<Int16Regs, ".b8">;
+def LoadParamMemV4I32 : LoadParamV4MemInst<Int32Regs, ".b32">;
+def LoadParamMemV4I16 : LoadParamV4MemInst<Int16Regs, ".b16">;
+def LoadParamMemV4I8 : LoadParamV4MemInst<Int16Regs, ".b8">;
+def LoadParamMemF16 : LoadParamMemInst<Float16Regs, ".b16">;
+def LoadParamMemF16x2 : LoadParamMemInst<Float16x2Regs, ".b32">;
+def LoadParamMemF32 : LoadParamMemInst<Float32Regs, ".f32">;
+def LoadParamMemF64 : LoadParamMemInst<Float64Regs, ".f64">;
+def LoadParamMemV2F16 : LoadParamV2MemInst<Float16Regs, ".b16">;
+def LoadParamMemV2F16x2: LoadParamV2MemInst<Float16x2Regs, ".b32">;
+def LoadParamMemV2F32 : LoadParamV2MemInst<Float32Regs, ".f32">;
+def LoadParamMemV2F64 : LoadParamV2MemInst<Float64Regs, ".f64">;
+def LoadParamMemV4F16 : LoadParamV4MemInst<Float16Regs, ".b16">;
+def LoadParamMemV4F16x2: LoadParamV4MemInst<Float16x2Regs, ".b32">;
+def LoadParamMemV4F32 : LoadParamV4MemInst<Float32Regs, ".f32">;
+
+def StoreParamI64 : StoreParamInst<Int64Regs, ".b64">;
+def StoreParamI32 : StoreParamInst<Int32Regs, ".b32">;
+
+def StoreParamI16 : StoreParamInst<Int16Regs, ".b16">;
+def StoreParamI8 : StoreParamInst<Int16Regs, ".b8">;
+def StoreParamV2I64 : StoreParamV2Inst<Int64Regs, ".b64">;
+def StoreParamV2I32 : StoreParamV2Inst<Int32Regs, ".b32">;
+def StoreParamV2I16 : StoreParamV2Inst<Int16Regs, ".b16">;
+def StoreParamV2I8 : StoreParamV2Inst<Int16Regs, ".b8">;
+
+def StoreParamV4I32 : StoreParamV4Inst<Int32Regs, ".b32">;
+def StoreParamV4I16 : StoreParamV4Inst<Int16Regs, ".b16">;
+def StoreParamV4I8 : StoreParamV4Inst<Int16Regs, ".b8">;
+
+def StoreParamF16 : StoreParamInst<Float16Regs, ".b16">;
+def StoreParamF16x2 : StoreParamInst<Float16x2Regs, ".b32">;
+def StoreParamF32 : StoreParamInst<Float32Regs, ".f32">;
+def StoreParamF64 : StoreParamInst<Float64Regs, ".f64">;
+def StoreParamV2F16 : StoreParamV2Inst<Float16Regs, ".b16">;
+def StoreParamV2F16x2 : StoreParamV2Inst<Float16x2Regs, ".b32">;
+def StoreParamV2F32 : StoreParamV2Inst<Float32Regs, ".f32">;
+def StoreParamV2F64 : StoreParamV2Inst<Float64Regs, ".f64">;
+def StoreParamV4F16 : StoreParamV4Inst<Float16Regs, ".b16">;
+def StoreParamV4F16x2 : StoreParamV4Inst<Float16x2Regs, ".b32">;
+def StoreParamV4F32 : StoreParamV4Inst<Float32Regs, ".f32">;
+
+def StoreRetvalI64 : StoreRetvalInst<Int64Regs, ".b64">;
+def StoreRetvalI32 : StoreRetvalInst<Int32Regs, ".b32">;
+def StoreRetvalI16 : StoreRetvalInst<Int16Regs, ".b16">;
+def StoreRetvalI8 : StoreRetvalInst<Int16Regs, ".b8">;
+def StoreRetvalV2I64 : StoreRetvalV2Inst<Int64Regs, ".b64">;
+def StoreRetvalV2I32 : StoreRetvalV2Inst<Int32Regs, ".b32">;
+def StoreRetvalV2I16 : StoreRetvalV2Inst<Int16Regs, ".b16">;
+def StoreRetvalV2I8 : StoreRetvalV2Inst<Int16Regs, ".b8">;
+def StoreRetvalV4I32 : StoreRetvalV4Inst<Int32Regs, ".b32">;
+def StoreRetvalV4I16 : StoreRetvalV4Inst<Int16Regs, ".b16">;
+def StoreRetvalV4I8 : StoreRetvalV4Inst<Int16Regs, ".b8">;
+
+def StoreRetvalF64 : StoreRetvalInst<Float64Regs, ".f64">;
+def StoreRetvalF32 : StoreRetvalInst<Float32Regs, ".f32">;
+def StoreRetvalF16 : StoreRetvalInst<Float16Regs, ".b16">;
+def StoreRetvalF16x2 : StoreRetvalInst<Float16x2Regs, ".b32">;
+def StoreRetvalV2F64 : StoreRetvalV2Inst<Float64Regs, ".f64">;
+def StoreRetvalV2F32 : StoreRetvalV2Inst<Float32Regs, ".f32">;
+def StoreRetvalV2F16 : StoreRetvalV2Inst<Float16Regs, ".b16">;
+def StoreRetvalV2F16x2: StoreRetvalV2Inst<Float16x2Regs, ".b32">;
+def StoreRetvalV4F32 : StoreRetvalV4Inst<Float32Regs, ".f32">;
+def StoreRetvalV4F16 : StoreRetvalV4Inst<Float16Regs, ".b16">;
+def StoreRetvalV4F16x2: StoreRetvalV4Inst<Float16x2Regs, ".b32">;
+
+def CallArgBeginInst : NVPTXInst<(outs), (ins), "(", [(CallArgBegin)]>;
+def CallArgEndInst1 : NVPTXInst<(outs), (ins), ");", [(CallArgEnd (i32 1))]>;
+def CallArgEndInst0 : NVPTXInst<(outs), (ins), ")", [(CallArgEnd (i32 0))]>;
+def RETURNInst : NVPTXInst<(outs), (ins), "ret;", [(RETURNNode)]>;
+
+class CallArgInst<NVPTXRegClass regclass> :
+ NVPTXInst<(outs), (ins regclass:$a), "$a, ",
+ [(CallArg (i32 0), regclass:$a)]>;
+
+class LastCallArgInst<NVPTXRegClass regclass> :
+ NVPTXInst<(outs), (ins regclass:$a), "$a",
+ [(LastCallArg (i32 0), regclass:$a)]>;
+
+def CallArgI64 : CallArgInst<Int64Regs>;
+def CallArgI32 : CallArgInst<Int32Regs>;
+def CallArgI16 : CallArgInst<Int16Regs>;
+def CallArgF64 : CallArgInst<Float64Regs>;
+def CallArgF32 : CallArgInst<Float32Regs>;
+
+def LastCallArgI64 : LastCallArgInst<Int64Regs>;
+def LastCallArgI32 : LastCallArgInst<Int32Regs>;
+def LastCallArgI16 : LastCallArgInst<Int16Regs>;
+def LastCallArgF64 : LastCallArgInst<Float64Regs>;
+def LastCallArgF32 : LastCallArgInst<Float32Regs>;
+
+def CallArgI32imm : NVPTXInst<(outs), (ins i32imm:$a), "$a, ",
+ [(CallArg (i32 0), (i32 imm:$a))]>;
+def LastCallArgI32imm : NVPTXInst<(outs), (ins i32imm:$a), "$a",
+ [(LastCallArg (i32 0), (i32 imm:$a))]>;
+
+def CallArgParam : NVPTXInst<(outs), (ins i32imm:$a), "param$a, ",
+ [(CallArg (i32 1), (i32 imm:$a))]>;
+def LastCallArgParam : NVPTXInst<(outs), (ins i32imm:$a), "param$a",
+ [(LastCallArg (i32 1), (i32 imm:$a))]>;
+
+def CallVoidInst : NVPTXInst<(outs), (ins imem:$addr), "$addr, ",
+ [(CallVoid (Wrapper tglobaladdr:$addr))]>;
+def CallVoidInstReg : NVPTXInst<(outs), (ins Int32Regs:$addr), "$addr, ",
+ [(CallVoid Int32Regs:$addr)]>;
+def CallVoidInstReg64 : NVPTXInst<(outs), (ins Int64Regs:$addr), "$addr, ",
+ [(CallVoid Int64Regs:$addr)]>;
+def PrototypeInst : NVPTXInst<(outs), (ins i32imm:$val), ", prototype_$val;",
+ [(Prototype (i32 imm:$val))]>;
+
+def DeclareRetMemInst :
+ NVPTXInst<(outs), (ins i32imm:$align, i32imm:$size, i32imm:$num),
+ ".param .align $align .b8 retval$num[$size];",
+ [(DeclareRetParam (i32 imm:$align), (i32 imm:$size), (i32 imm:$num))]>;
+def DeclareRetScalarInst :
+ NVPTXInst<(outs), (ins i32imm:$size, i32imm:$num),
+ ".param .b$size retval$num;",
+ [(DeclareRet (i32 1), (i32 imm:$size), (i32 imm:$num))]>;
+def DeclareRetRegInst :
+ NVPTXInst<(outs), (ins i32imm:$size, i32imm:$num),
+ ".reg .b$size retval$num;",
+ [(DeclareRet (i32 2), (i32 imm:$size), (i32 imm:$num))]>;
+
+def DeclareParamInst :
+ NVPTXInst<(outs), (ins i32imm:$align, i32imm:$a, i32imm:$size),
+ ".param .align $align .b8 param$a[$size];",
+ [(DeclareParam (i32 imm:$align), (i32 imm:$a), (i32 imm:$size))]>;
+def DeclareScalarParamInst :
+ NVPTXInst<(outs), (ins i32imm:$a, i32imm:$size),
+ ".param .b$size param$a;",
+ [(DeclareScalarParam (i32 imm:$a), (i32 imm:$size), (i32 0))]>;
+def DeclareScalarRegInst :
+ NVPTXInst<(outs), (ins i32imm:$a, i32imm:$size),
+ ".reg .b$size param$a;",
+ [(DeclareScalarParam (i32 imm:$a), (i32 imm:$size), (i32 1))]>;
+
+class MoveParamInst<NVPTXRegClass regclass, string asmstr> :
+ NVPTXInst<(outs regclass:$dst), (ins regclass:$src),
+ !strconcat("mov", asmstr, " \t$dst, $src;"),
+ [(set regclass:$dst, (MoveParam regclass:$src))]>;
+
+def MoveParamI64 : MoveParamInst<Int64Regs, ".b64">;
+def MoveParamI32 : MoveParamInst<Int32Regs, ".b32">;
+def MoveParamI16 :
+ NVPTXInst<(outs Int16Regs:$dst), (ins Int16Regs:$src),
+ "cvt.u16.u32 \t$dst, $src;",
+ [(set Int16Regs:$dst, (MoveParam Int16Regs:$src))]>;
+def MoveParamF64 : MoveParamInst<Float64Regs, ".f64">;
+def MoveParamF32 : MoveParamInst<Float32Regs, ".f32">;
+def MoveParamF16 : MoveParamInst<Float16Regs, ".f16">;
+
+class PseudoUseParamInst<NVPTXRegClass regclass> :
+ NVPTXInst<(outs), (ins regclass:$src),
+ "// Pseudo use of $src",
+ [(PseudoUseParam regclass:$src)]>;
+
+def PseudoUseParamI64 : PseudoUseParamInst<Int64Regs>;
+def PseudoUseParamI32 : PseudoUseParamInst<Int32Regs>;
+def PseudoUseParamI16 : PseudoUseParamInst<Int16Regs>;
+def PseudoUseParamF64 : PseudoUseParamInst<Float64Regs>;
+def PseudoUseParamF32 : PseudoUseParamInst<Float32Regs>;
+
+
+//
+// Load / Store Handling
+//
+multiclass LD<NVPTXRegClass regclass> {
+ def _avar : NVPTXInst<
+ (outs regclass:$dst),
+ (ins LdStCode:$isVol, LdStCode:$addsp, LdStCode:$Vec, LdStCode:$Sign,
+ i32imm:$fromWidth, imem:$addr),
+ "ld${isVol:volatile}${addsp:addsp}${Vec:vec}.${Sign:sign}$fromWidth "
+ "\t$dst, [$addr];", []>;
+ def _areg : NVPTXInst<
+ (outs regclass:$dst),
+ (ins LdStCode:$isVol, LdStCode:$addsp, LdStCode:$Vec, LdStCode:$Sign,
+ i32imm:$fromWidth, Int32Regs:$addr),
+ "ld${isVol:volatile}${addsp:addsp}${Vec:vec}.${Sign:sign}$fromWidth "
+ "\t$dst, [$addr];", []>;
+ def _areg_64 : NVPTXInst<
+ (outs regclass:$dst),
+ (ins LdStCode:$isVol, LdStCode:$addsp, LdStCode:$Vec, LdStCode:$Sign,
+ i32imm:$fromWidth, Int64Regs:$addr),
+ "ld${isVol:volatile}${addsp:addsp}${Vec:vec}.${Sign:sign}$fromWidth "
+ "\t$dst, [$addr];", []>;
+ def _ari : NVPTXInst<
+ (outs regclass:$dst),
+ (ins LdStCode:$isVol, LdStCode:$addsp, LdStCode:$Vec, LdStCode:$Sign,
+ i32imm:$fromWidth, Int32Regs:$addr, i32imm:$offset),
+ "ld${isVol:volatile}${addsp:addsp}${Vec:vec}.${Sign:sign}$fromWidth "
+ "\t$dst, [$addr+$offset];", []>;
+ def _ari_64 : NVPTXInst<
+ (outs regclass:$dst),
+ (ins LdStCode:$isVol, LdStCode:$addsp, LdStCode:$Vec,
+ LdStCode:$Sign, i32imm:$fromWidth, Int64Regs:$addr, i32imm:$offset),
+ "ld${isVol:volatile}${addsp:addsp}${Vec:vec}.${Sign:sign}$fromWidth "
+ "\t$dst, [$addr+$offset];", []>;
+ def _asi : NVPTXInst<
+ (outs regclass:$dst),
+ (ins LdStCode:$isVol, LdStCode:$addsp, LdStCode:$Vec,
+ LdStCode:$Sign, i32imm:$fromWidth, imem:$addr, i32imm:$offset),
+ "ld${isVol:volatile}${addsp:addsp}${Vec:vec}.${Sign:sign}$fromWidth "
+ "\t$dst, [$addr+$offset];", []>;
+}
+
+let mayLoad=1, hasSideEffects=0 in {
+ defm LD_i8 : LD<Int16Regs>;
+ defm LD_i16 : LD<Int16Regs>;
+ defm LD_i32 : LD<Int32Regs>;
+ defm LD_i64 : LD<Int64Regs>;
+ defm LD_f16 : LD<Float16Regs>;
+ defm LD_f16x2 : LD<Float16x2Regs>;
+ defm LD_f32 : LD<Float32Regs>;
+ defm LD_f64 : LD<Float64Regs>;
+}
+
+multiclass ST<NVPTXRegClass regclass> {
+ def _avar : NVPTXInst<
+ (outs),
+ (ins regclass:$src, LdStCode:$isVol, LdStCode:$addsp, LdStCode:$Vec,
+ LdStCode:$Sign, i32imm:$toWidth, imem:$addr),
+ "st${isVol:volatile}${addsp:addsp}${Vec:vec}.${Sign:sign}$toWidth"
+ " \t[$addr], $src;", []>;
+ def _areg : NVPTXInst<
+ (outs),
+ (ins regclass:$src, LdStCode:$isVol, LdStCode:$addsp,
+ LdStCode:$Vec, LdStCode:$Sign, i32imm:$toWidth, Int32Regs:$addr),
+ "st${isVol:volatile}${addsp:addsp}${Vec:vec}.${Sign:sign}$toWidth"
+ " \t[$addr], $src;", []>;
+ def _areg_64 : NVPTXInst<
+ (outs),
+ (ins regclass:$src, LdStCode:$isVol, LdStCode:$addsp, LdStCode:$Vec,
+ LdStCode:$Sign, i32imm:$toWidth, Int64Regs:$addr),
+ "st${isVol:volatile}${addsp:addsp}${Vec:vec}.${Sign:sign}$toWidth"
+ " \t[$addr], $src;", []>;
+ def _ari : NVPTXInst<
+ (outs),
+ (ins regclass:$src, LdStCode:$isVol, LdStCode:$addsp, LdStCode:$Vec,
+ LdStCode:$Sign, i32imm:$toWidth, Int32Regs:$addr, i32imm:$offset),
+ "st${isVol:volatile}${addsp:addsp}${Vec:vec}.${Sign:sign}$toWidth"
+ " \t[$addr+$offset], $src;", []>;
+ def _ari_64 : NVPTXInst<
+ (outs),
+ (ins regclass:$src, LdStCode:$isVol, LdStCode:$addsp, LdStCode:$Vec,
+ LdStCode:$Sign, i32imm:$toWidth, Int64Regs:$addr, i32imm:$offset),
+ "st${isVol:volatile}${addsp:addsp}${Vec:vec}.${Sign:sign}$toWidth"
+ " \t[$addr+$offset], $src;", []>;
+ def _asi : NVPTXInst<
+ (outs),
+ (ins regclass:$src, LdStCode:$isVol, LdStCode:$addsp, LdStCode:$Vec,
+ LdStCode:$Sign, i32imm:$toWidth, imem:$addr, i32imm:$offset),
+ "st${isVol:volatile}${addsp:addsp}${Vec:vec}.${Sign:sign}$toWidth"
+ " \t[$addr+$offset], $src;", []>;
+}
+
+let mayStore=1, hasSideEffects=0 in {
+ defm ST_i8 : ST<Int16Regs>;
+ defm ST_i16 : ST<Int16Regs>;
+ defm ST_i32 : ST<Int32Regs>;
+ defm ST_i64 : ST<Int64Regs>;
+ defm ST_f16 : ST<Float16Regs>;
+ defm ST_f16x2 : ST<Float16x2Regs>;
+ defm ST_f32 : ST<Float32Regs>;
+ defm ST_f64 : ST<Float64Regs>;
+}
+
+// The following is used only in and after vector elementizations. Vector
+// elementization happens at the machine instruction level, so the following
+// instructions never appear in the DAG.
+multiclass LD_VEC<NVPTXRegClass regclass> {
+ def _v2_avar : NVPTXInst<
+ (outs regclass:$dst1, regclass:$dst2),
+ (ins LdStCode:$isVol, LdStCode:$addsp, LdStCode:$Vec, LdStCode:$Sign,
+ i32imm:$fromWidth, imem:$addr),
+ "ld${isVol:volatile}${addsp:addsp}${Vec:vec}.${Sign:sign}$fromWidth "
+ "\t{{$dst1, $dst2}}, [$addr];", []>;
+ def _v2_areg : NVPTXInst<
+ (outs regclass:$dst1, regclass:$dst2),
+ (ins LdStCode:$isVol, LdStCode:$addsp, LdStCode:$Vec, LdStCode:$Sign,
+ i32imm:$fromWidth, Int32Regs:$addr),
+ "ld${isVol:volatile}${addsp:addsp}${Vec:vec}.${Sign:sign}$fromWidth "
+ "\t{{$dst1, $dst2}}, [$addr];", []>;
+ def _v2_areg_64 : NVPTXInst<
+ (outs regclass:$dst1, regclass:$dst2),
+ (ins LdStCode:$isVol, LdStCode:$addsp, LdStCode:$Vec, LdStCode:$Sign,
+ i32imm:$fromWidth, Int64Regs:$addr),
+ "ld${isVol:volatile}${addsp:addsp}${Vec:vec}.${Sign:sign}$fromWidth "
+ "\t{{$dst1, $dst2}}, [$addr];", []>;
+ def _v2_ari : NVPTXInst<
+ (outs regclass:$dst1, regclass:$dst2),
+ (ins LdStCode:$isVol, LdStCode:$addsp, LdStCode:$Vec, LdStCode:$Sign,
+ i32imm:$fromWidth, Int32Regs:$addr, i32imm:$offset),
+ "ld${isVol:volatile}${addsp:addsp}${Vec:vec}.${Sign:sign}$fromWidth "
+ "\t{{$dst1, $dst2}}, [$addr+$offset];", []>;
+ def _v2_ari_64 : NVPTXInst<
+ (outs regclass:$dst1, regclass:$dst2),
+ (ins LdStCode:$isVol, LdStCode:$addsp, LdStCode:$Vec, LdStCode:$Sign,
+ i32imm:$fromWidth, Int64Regs:$addr, i32imm:$offset),
+ "ld${isVol:volatile}${addsp:addsp}${Vec:vec}.${Sign:sign}$fromWidth "
+ "\t{{$dst1, $dst2}}, [$addr+$offset];", []>;
+ def _v2_asi : NVPTXInst<
+ (outs regclass:$dst1, regclass:$dst2),
+ (ins LdStCode:$isVol, LdStCode:$addsp, LdStCode:$Vec, LdStCode:$Sign,
+ i32imm:$fromWidth, imem:$addr, i32imm:$offset),
+ "ld${isVol:volatile}${addsp:addsp}${Vec:vec}.${Sign:sign}$fromWidth "
+ "\t{{$dst1, $dst2}}, [$addr+$offset];", []>;
+ def _v4_avar : NVPTXInst<
+ (outs regclass:$dst1, regclass:$dst2, regclass:$dst3, regclass:$dst4),
+ (ins LdStCode:$isVol, LdStCode:$addsp, LdStCode:$Vec, LdStCode:$Sign,
+ i32imm:$fromWidth, imem:$addr),
+ "ld${isVol:volatile}${addsp:addsp}${Vec:vec}.${Sign:sign}$fromWidth "
+ "\t{{$dst1, $dst2, $dst3, $dst4}}, [$addr];", []>;
+ def _v4_areg : NVPTXInst<
+ (outs regclass:$dst1, regclass:$dst2, regclass:$dst3, regclass:$dst4),
+ (ins LdStCode:$isVol, LdStCode:$addsp, LdStCode:$Vec, LdStCode:$Sign,
+ i32imm:$fromWidth, Int32Regs:$addr),
+ "ld${isVol:volatile}${addsp:addsp}${Vec:vec}.${Sign:sign}$fromWidth "
+ "\t{{$dst1, $dst2, $dst3, $dst4}}, [$addr];", []>;
+ def _v4_areg_64 : NVPTXInst<
+ (outs regclass:$dst1, regclass:$dst2, regclass:$dst3, regclass:$dst4),
+ (ins LdStCode:$isVol, LdStCode:$addsp, LdStCode:$Vec, LdStCode:$Sign,
+ i32imm:$fromWidth, Int64Regs:$addr),
+ "ld${isVol:volatile}${addsp:addsp}${Vec:vec}.${Sign:sign}$fromWidth "
+ "\t{{$dst1, $dst2, $dst3, $dst4}}, [$addr];", []>;
+ def _v4_ari : NVPTXInst<
+ (outs regclass:$dst1, regclass:$dst2, regclass:$dst3, regclass:$dst4),
+ (ins LdStCode:$isVol, LdStCode:$addsp, LdStCode:$Vec, LdStCode:$Sign,
+ i32imm:$fromWidth, Int32Regs:$addr, i32imm:$offset),
+ "ld${isVol:volatile}${addsp:addsp}${Vec:vec}.${Sign:sign}$fromWidth "
+ "\t{{$dst1, $dst2, $dst3, $dst4}}, [$addr+$offset];", []>;
+ def _v4_ari_64 : NVPTXInst<
+ (outs regclass:$dst1, regclass:$dst2, regclass:$dst3, regclass:$dst4),
+ (ins LdStCode:$isVol, LdStCode:$addsp, LdStCode:$Vec, LdStCode:$Sign,
+ i32imm:$fromWidth, Int64Regs:$addr, i32imm:$offset),
+ "ld${isVol:volatile}${addsp:addsp}${Vec:vec}.${Sign:sign}$fromWidth "
+ "\t{{$dst1, $dst2, $dst3, $dst4}}, [$addr+$offset];", []>;
+ def _v4_asi : NVPTXInst<
+ (outs regclass:$dst1, regclass:$dst2, regclass:$dst3, regclass:$dst4),
+ (ins LdStCode:$isVol, LdStCode:$addsp, LdStCode:$Vec, LdStCode:$Sign,
+ i32imm:$fromWidth, imem:$addr, i32imm:$offset),
+ "ld${isVol:volatile}${addsp:addsp}${Vec:vec}.${Sign:sign}$fromWidth "
+ "\t{{$dst1, $dst2, $dst3, $dst4}}, [$addr+$offset];", []>;
+}
+let mayLoad=1, hasSideEffects=0 in {
+ defm LDV_i8 : LD_VEC<Int16Regs>;
+ defm LDV_i16 : LD_VEC<Int16Regs>;
+ defm LDV_i32 : LD_VEC<Int32Regs>;
+ defm LDV_i64 : LD_VEC<Int64Regs>;
+ defm LDV_f16 : LD_VEC<Float16Regs>;
+ defm LDV_f16x2 : LD_VEC<Float16x2Regs>;
+ defm LDV_f32 : LD_VEC<Float32Regs>;
+ defm LDV_f64 : LD_VEC<Float64Regs>;
+}
+
+multiclass ST_VEC<NVPTXRegClass regclass> {
+ def _v2_avar : NVPTXInst<
+ (outs),
+ (ins regclass:$src1, regclass:$src2, LdStCode:$isVol, LdStCode:$addsp,
+ LdStCode:$Vec, LdStCode:$Sign, i32imm:$fromWidth, imem:$addr),
+ "st${isVol:volatile}${addsp:addsp}${Vec:vec}.${Sign:sign}$fromWidth "
+ "\t[$addr], {{$src1, $src2}};", []>;
+ def _v2_areg : NVPTXInst<
+ (outs),
+ (ins regclass:$src1, regclass:$src2, LdStCode:$isVol, LdStCode:$addsp,
+ LdStCode:$Vec, LdStCode:$Sign, i32imm:$fromWidth, Int32Regs:$addr),
+ "st${isVol:volatile}${addsp:addsp}${Vec:vec}.${Sign:sign}$fromWidth "
+ "\t[$addr], {{$src1, $src2}};", []>;
+ def _v2_areg_64 : NVPTXInst<
+ (outs),
+ (ins regclass:$src1, regclass:$src2, LdStCode:$isVol, LdStCode:$addsp,
+ LdStCode:$Vec, LdStCode:$Sign, i32imm:$fromWidth, Int64Regs:$addr),
+ "st${isVol:volatile}${addsp:addsp}${Vec:vec}.${Sign:sign}$fromWidth "
+ "\t[$addr], {{$src1, $src2}};", []>;
+ def _v2_ari : NVPTXInst<
+ (outs),
+ (ins regclass:$src1, regclass:$src2, LdStCode:$isVol, LdStCode:$addsp,
+ LdStCode:$Vec, LdStCode:$Sign, i32imm:$fromWidth, Int32Regs:$addr,
+ i32imm:$offset),
+ "st${isVol:volatile}${addsp:addsp}${Vec:vec}.${Sign:sign}$fromWidth "
+ "\t[$addr+$offset], {{$src1, $src2}};", []>;
+ def _v2_ari_64 : NVPTXInst<
+ (outs),
+ (ins regclass:$src1, regclass:$src2, LdStCode:$isVol, LdStCode:$addsp,
+ LdStCode:$Vec, LdStCode:$Sign, i32imm:$fromWidth, Int64Regs:$addr,
+ i32imm:$offset),
+ "st${isVol:volatile}${addsp:addsp}${Vec:vec}.${Sign:sign}$fromWidth "
+ "\t[$addr+$offset], {{$src1, $src2}};", []>;
+ def _v2_asi : NVPTXInst<
+ (outs),
+ (ins regclass:$src1, regclass:$src2, LdStCode:$isVol, LdStCode:$addsp,
+ LdStCode:$Vec, LdStCode:$Sign, i32imm:$fromWidth, imem:$addr,
+ i32imm:$offset),
+ "st${isVol:volatile}${addsp:addsp}${Vec:vec}.${Sign:sign}$fromWidth "
+ "\t[$addr+$offset], {{$src1, $src2}};", []>;
+ def _v4_avar : NVPTXInst<
+ (outs),
+ (ins regclass:$src1, regclass:$src2, regclass:$src3, regclass:$src4,
+ LdStCode:$isVol, LdStCode:$addsp, LdStCode:$Vec, LdStCode:$Sign,
+ i32imm:$fromWidth, imem:$addr),
+ "st${isVol:volatile}${addsp:addsp}${Vec:vec}.${Sign:sign}$fromWidth "
+ "\t[$addr], {{$src1, $src2, $src3, $src4}};", []>;
+ def _v4_areg : NVPTXInst<
+ (outs),
+ (ins regclass:$src1, regclass:$src2, regclass:$src3, regclass:$src4,
+ LdStCode:$isVol, LdStCode:$addsp, LdStCode:$Vec, LdStCode:$Sign,
+ i32imm:$fromWidth, Int32Regs:$addr),
+ "st${isVol:volatile}${addsp:addsp}${Vec:vec}.${Sign:sign}$fromWidth "
+ "\t[$addr], {{$src1, $src2, $src3, $src4}};", []>;
+ def _v4_areg_64 : NVPTXInst<
+ (outs),
+ (ins regclass:$src1, regclass:$src2, regclass:$src3, regclass:$src4,
+ LdStCode:$isVol, LdStCode:$addsp, LdStCode:$Vec, LdStCode:$Sign,
+ i32imm:$fromWidth, Int64Regs:$addr),
+ "st${isVol:volatile}${addsp:addsp}${Vec:vec}.${Sign:sign}$fromWidth "
+ "\t[$addr], {{$src1, $src2, $src3, $src4}};", []>;
+ def _v4_ari : NVPTXInst<
+ (outs),
+ (ins regclass:$src1, regclass:$src2, regclass:$src3, regclass:$src4,
+ LdStCode:$isVol, LdStCode:$addsp, LdStCode:$Vec, LdStCode:$Sign,
+ i32imm:$fromWidth, Int32Regs:$addr, i32imm:$offset),
+ "st${isVol:volatile}${addsp:addsp}${Vec:vec}.${Sign:sign}$fromWidth "
+ "\t[$addr+$offset], {{$src1, $src2, $src3, $src4}};", []>;
+ def _v4_ari_64 : NVPTXInst<
+ (outs),
+ (ins regclass:$src1, regclass:$src2, regclass:$src3, regclass:$src4,
+ LdStCode:$isVol, LdStCode:$addsp, LdStCode:$Vec, LdStCode:$Sign,
+ i32imm:$fromWidth, Int64Regs:$addr, i32imm:$offset),
+ "st${isVol:volatile}${addsp:addsp}${Vec:vec}.${Sign:sign}$fromWidth "
+ "\t[$addr+$offset], {{$src1, $src2, $src3, $src4}};", []>;
+ def _v4_asi : NVPTXInst<
+ (outs),
+ (ins regclass:$src1, regclass:$src2, regclass:$src3, regclass:$src4,
+ LdStCode:$isVol, LdStCode:$addsp, LdStCode:$Vec, LdStCode:$Sign,
+ i32imm:$fromWidth, imem:$addr, i32imm:$offset),
+ "st${isVol:volatile}${addsp:addsp}${Vec:vec}.${Sign:sign}"
+ "$fromWidth \t[$addr+$offset], {{$src1, $src2, $src3, $src4}};", []>;
+}
+
+let mayStore=1, hasSideEffects=0 in {
+ defm STV_i8 : ST_VEC<Int16Regs>;
+ defm STV_i16 : ST_VEC<Int16Regs>;
+ defm STV_i32 : ST_VEC<Int32Regs>;
+ defm STV_i64 : ST_VEC<Int64Regs>;
+ defm STV_f16 : ST_VEC<Float16Regs>;
+ defm STV_f16x2 : ST_VEC<Float16x2Regs>;
+ defm STV_f32 : ST_VEC<Float32Regs>;
+ defm STV_f64 : ST_VEC<Float64Regs>;
+}
+
+//---- Conversion ----
+
+class F_BITCONVERT<string SzStr, NVPTXRegClass regclassIn,
+ NVPTXRegClass regclassOut> :
+ NVPTXInst<(outs regclassOut:$d), (ins regclassIn:$a),
+ !strconcat("mov.b", !strconcat(SzStr, " \t$d, $a;")),
+ [(set regclassOut:$d, (bitconvert regclassIn:$a))]>;
+
+def BITCONVERT_16_I2F : F_BITCONVERT<"16", Int16Regs, Float16Regs>;
+def BITCONVERT_16_F2I : F_BITCONVERT<"16", Float16Regs, Int16Regs>;
+def BITCONVERT_32_I2F : F_BITCONVERT<"32", Int32Regs, Float32Regs>;
+def BITCONVERT_32_F2I : F_BITCONVERT<"32", Float32Regs, Int32Regs>;
+def BITCONVERT_64_I2F : F_BITCONVERT<"64", Int64Regs, Float64Regs>;
+def BITCONVERT_64_F2I : F_BITCONVERT<"64", Float64Regs, Int64Regs>;
+def BITCONVERT_32_I2F16x2 : F_BITCONVERT<"32", Int32Regs, Float16x2Regs>;
+def BITCONVERT_32_F16x22I : F_BITCONVERT<"32", Float16x2Regs, Int32Regs>;
+
+// NOTE: pred->fp are currently sub-optimal due to an issue in TableGen where
+// we cannot specify floating-point literals in isel patterns. Therefore, we
+// use an integer selp to select either 1 or 0 and then cvt to floating-point.
+
+// sint -> f16
+def : Pat<(f16 (sint_to_fp Int1Regs:$a)),
+ (CVT_f16_s32 (SELP_u32ii 1, 0, Int1Regs:$a), CvtRN)>;
+def : Pat<(f16 (sint_to_fp Int16Regs:$a)),
+ (CVT_f16_s16 Int16Regs:$a, CvtRN)>;
+def : Pat<(f16 (sint_to_fp Int32Regs:$a)),
+ (CVT_f16_s32 Int32Regs:$a, CvtRN)>;
+def : Pat<(f16 (sint_to_fp Int64Regs:$a)),
+ (CVT_f16_s64 Int64Regs:$a, CvtRN)>;
+
+// uint -> f16
+def : Pat<(f16 (uint_to_fp Int1Regs:$a)),
+ (CVT_f16_u32 (SELP_u32ii 1, 0, Int1Regs:$a), CvtRN)>;
+def : Pat<(f16 (uint_to_fp Int16Regs:$a)),
+ (CVT_f16_u16 Int16Regs:$a, CvtRN)>;
+def : Pat<(f16 (uint_to_fp Int32Regs:$a)),
+ (CVT_f16_u32 Int32Regs:$a, CvtRN)>;
+def : Pat<(f16 (uint_to_fp Int64Regs:$a)),
+ (CVT_f16_u64 Int64Regs:$a, CvtRN)>;
+
+// sint -> f32
+def : Pat<(f32 (sint_to_fp Int1Regs:$a)),
+ (CVT_f32_s32 (SELP_u32ii 1, 0, Int1Regs:$a), CvtRN)>;
+def : Pat<(f32 (sint_to_fp Int16Regs:$a)),
+ (CVT_f32_s16 Int16Regs:$a, CvtRN)>;
+def : Pat<(f32 (sint_to_fp Int32Regs:$a)),
+ (CVT_f32_s32 Int32Regs:$a, CvtRN)>;
+def : Pat<(f32 (sint_to_fp Int64Regs:$a)),
+ (CVT_f32_s64 Int64Regs:$a, CvtRN)>;
+
+// uint -> f32
+def : Pat<(f32 (uint_to_fp Int1Regs:$a)),
+ (CVT_f32_u32 (SELP_u32ii 1, 0, Int1Regs:$a), CvtRN)>;
+def : Pat<(f32 (uint_to_fp Int16Regs:$a)),
+ (CVT_f32_u16 Int16Regs:$a, CvtRN)>;
+def : Pat<(f32 (uint_to_fp Int32Regs:$a)),
+ (CVT_f32_u32 Int32Regs:$a, CvtRN)>;
+def : Pat<(f32 (uint_to_fp Int64Regs:$a)),
+ (CVT_f32_u64 Int64Regs:$a, CvtRN)>;
+
+// sint -> f64
+def : Pat<(f64 (sint_to_fp Int1Regs:$a)),
+ (CVT_f64_s32 (SELP_u32ii 1, 0, Int1Regs:$a), CvtRN)>;
+def : Pat<(f64 (sint_to_fp Int16Regs:$a)),
+ (CVT_f64_s16 Int16Regs:$a, CvtRN)>;
+def : Pat<(f64 (sint_to_fp Int32Regs:$a)),
+ (CVT_f64_s32 Int32Regs:$a, CvtRN)>;
+def : Pat<(f64 (sint_to_fp Int64Regs:$a)),
+ (CVT_f64_s64 Int64Regs:$a, CvtRN)>;
+
+// uint -> f64
+def : Pat<(f64 (uint_to_fp Int1Regs:$a)),
+ (CVT_f64_u32 (SELP_u32ii 1, 0, Int1Regs:$a), CvtRN)>;
+def : Pat<(f64 (uint_to_fp Int16Regs:$a)),
+ (CVT_f64_u16 Int16Regs:$a, CvtRN)>;
+def : Pat<(f64 (uint_to_fp Int32Regs:$a)),
+ (CVT_f64_u32 Int32Regs:$a, CvtRN)>;
+def : Pat<(f64 (uint_to_fp Int64Regs:$a)),
+ (CVT_f64_u64 Int64Regs:$a, CvtRN)>;
+
+
+// f16 -> sint
+def : Pat<(i1 (fp_to_sint Float16Regs:$a)),
+ (SETP_b16ri (BITCONVERT_16_F2I Float16Regs:$a), 0, CmpEQ)>;
+def : Pat<(i16 (fp_to_sint Float16Regs:$a)),
+ (CVT_s16_f16 Float16Regs:$a, CvtRZI_FTZ)>, Requires<[doF32FTZ]>;
+def : Pat<(i16 (fp_to_sint Float16Regs:$a)),
+ (CVT_s16_f16 Float16Regs:$a, CvtRZI)>;
+def : Pat<(i32 (fp_to_sint Float16Regs:$a)),
+ (CVT_s32_f16 Float16Regs:$a, CvtRZI_FTZ)>, Requires<[doF32FTZ]>;
+def : Pat<(i32 (fp_to_sint Float16Regs:$a)),
+ (CVT_s32_f16 Float16Regs:$a, CvtRZI)>;
+def : Pat<(i64 (fp_to_sint Float16Regs:$a)),
+ (CVT_s64_f16 Float16Regs:$a, CvtRZI_FTZ)>, Requires<[doF32FTZ]>;
+def : Pat<(i64 (fp_to_sint Float16Regs:$a)),
+ (CVT_s64_f16 Float16Regs:$a, CvtRZI)>;
+
+// f16 -> uint
+def : Pat<(i1 (fp_to_uint Float16Regs:$a)),
+ (SETP_b16ri (BITCONVERT_16_F2I Float16Regs:$a), 0, CmpEQ)>;
+def : Pat<(i16 (fp_to_uint Float16Regs:$a)),
+ (CVT_u16_f16 Float16Regs:$a, CvtRZI_FTZ)>, Requires<[doF32FTZ]>;
+def : Pat<(i16 (fp_to_uint Float16Regs:$a)),
+ (CVT_u16_f16 Float16Regs:$a, CvtRZI)>;
+def : Pat<(i32 (fp_to_uint Float16Regs:$a)),
+ (CVT_u32_f16 Float16Regs:$a, CvtRZI_FTZ)>, Requires<[doF32FTZ]>;
+def : Pat<(i32 (fp_to_uint Float16Regs:$a)),
+ (CVT_u32_f16 Float16Regs:$a, CvtRZI)>;
+def : Pat<(i64 (fp_to_uint Float16Regs:$a)),
+ (CVT_u64_f16 Float16Regs:$a, CvtRZI_FTZ)>, Requires<[doF32FTZ]>;
+def : Pat<(i64 (fp_to_uint Float16Regs:$a)),
+ (CVT_u64_f16 Float16Regs:$a, CvtRZI)>;
+
+// f32 -> sint
+def : Pat<(i1 (fp_to_sint Float32Regs:$a)),
+ (SETP_b32ri (BITCONVERT_32_F2I Float32Regs:$a), 0, CmpEQ)>;
+def : Pat<(i16 (fp_to_sint Float32Regs:$a)),
+ (CVT_s16_f32 Float32Regs:$a, CvtRZI_FTZ)>, Requires<[doF32FTZ]>;
+def : Pat<(i16 (fp_to_sint Float32Regs:$a)),
+ (CVT_s16_f32 Float32Regs:$a, CvtRZI)>;
+def : Pat<(i32 (fp_to_sint Float32Regs:$a)),
+ (CVT_s32_f32 Float32Regs:$a, CvtRZI_FTZ)>, Requires<[doF32FTZ]>;
+def : Pat<(i32 (fp_to_sint Float32Regs:$a)),
+ (CVT_s32_f32 Float32Regs:$a, CvtRZI)>;
+def : Pat<(i64 (fp_to_sint Float32Regs:$a)),
+ (CVT_s64_f32 Float32Regs:$a, CvtRZI_FTZ)>, Requires<[doF32FTZ]>;
+def : Pat<(i64 (fp_to_sint Float32Regs:$a)),
+ (CVT_s64_f32 Float32Regs:$a, CvtRZI)>;
+
+// f32 -> uint
+def : Pat<(i1 (fp_to_uint Float32Regs:$a)),
+ (SETP_b32ri (BITCONVERT_32_F2I Float32Regs:$a), 0, CmpEQ)>;
+def : Pat<(i16 (fp_to_uint Float32Regs:$a)),
+ (CVT_u16_f32 Float32Regs:$a, CvtRZI_FTZ)>, Requires<[doF32FTZ]>;
+def : Pat<(i16 (fp_to_uint Float32Regs:$a)),
+ (CVT_u16_f32 Float32Regs:$a, CvtRZI)>;
+def : Pat<(i32 (fp_to_uint Float32Regs:$a)),
+ (CVT_u32_f32 Float32Regs:$a, CvtRZI_FTZ)>, Requires<[doF32FTZ]>;
+def : Pat<(i32 (fp_to_uint Float32Regs:$a)),
+ (CVT_u32_f32 Float32Regs:$a, CvtRZI)>;
+def : Pat<(i64 (fp_to_uint Float32Regs:$a)),
+ (CVT_u64_f32 Float32Regs:$a, CvtRZI_FTZ)>, Requires<[doF32FTZ]>;
+def : Pat<(i64 (fp_to_uint Float32Regs:$a)),
+ (CVT_u64_f32 Float32Regs:$a, CvtRZI)>;
+
+// f64 -> sint
+def : Pat<(i1 (fp_to_sint Float64Regs:$a)),
+ (SETP_b64ri (BITCONVERT_64_F2I Float64Regs:$a), 0, CmpEQ)>;
+def : Pat<(i16 (fp_to_sint Float64Regs:$a)),
+ (CVT_s16_f64 Float64Regs:$a, CvtRZI)>;
+def : Pat<(i32 (fp_to_sint Float64Regs:$a)),
+ (CVT_s32_f64 Float64Regs:$a, CvtRZI)>;
+def : Pat<(i64 (fp_to_sint Float64Regs:$a)),
+ (CVT_s64_f64 Float64Regs:$a, CvtRZI)>;
+
+// f64 -> uint
+def : Pat<(i1 (fp_to_uint Float64Regs:$a)),
+ (SETP_b64ri (BITCONVERT_64_F2I Float64Regs:$a), 0, CmpEQ)>;
+def : Pat<(i16 (fp_to_uint Float64Regs:$a)),
+ (CVT_u16_f64 Float64Regs:$a, CvtRZI)>;
+def : Pat<(i32 (fp_to_uint Float64Regs:$a)),
+ (CVT_u32_f64 Float64Regs:$a, CvtRZI)>;
+def : Pat<(i64 (fp_to_uint Float64Regs:$a)),
+ (CVT_u64_f64 Float64Regs:$a, CvtRZI)>;
+
+// sext i1
+def : Pat<(i16 (sext Int1Regs:$a)),
+ (SELP_s16ii -1, 0, Int1Regs:$a)>;
+def : Pat<(i32 (sext Int1Regs:$a)),
+ (SELP_s32ii -1, 0, Int1Regs:$a)>;
+def : Pat<(i64 (sext Int1Regs:$a)),
+ (SELP_s64ii -1, 0, Int1Regs:$a)>;
+
+// zext i1
+def : Pat<(i16 (zext Int1Regs:$a)),
+ (SELP_u16ii 1, 0, Int1Regs:$a)>;
+def : Pat<(i32 (zext Int1Regs:$a)),
+ (SELP_u32ii 1, 0, Int1Regs:$a)>;
+def : Pat<(i64 (zext Int1Regs:$a)),
+ (SELP_u64ii 1, 0, Int1Regs:$a)>;
+
+// anyext i1
+def : Pat<(i16 (anyext Int1Regs:$a)),
+ (SELP_u16ii -1, 0, Int1Regs:$a)>;
+def : Pat<(i32 (anyext Int1Regs:$a)),
+ (SELP_u32ii -1, 0, Int1Regs:$a)>;
+def : Pat<(i64 (anyext Int1Regs:$a)),
+ (SELP_u64ii -1, 0, Int1Regs:$a)>;
+
+// sext i16
+def : Pat<(i32 (sext Int16Regs:$a)),
+ (CVT_s32_s16 Int16Regs:$a, CvtNONE)>;
+def : Pat<(i64 (sext Int16Regs:$a)),
+ (CVT_s64_s16 Int16Regs:$a, CvtNONE)>;
+
+// zext i16
+def : Pat<(i32 (zext Int16Regs:$a)),
+ (CVT_u32_u16 Int16Regs:$a, CvtNONE)>;
+def : Pat<(i64 (zext Int16Regs:$a)),
+ (CVT_u64_u16 Int16Regs:$a, CvtNONE)>;
+
+// anyext i16
+def : Pat<(i32 (anyext Int16Regs:$a)),
+ (CVT_u32_u16 Int16Regs:$a, CvtNONE)>;
+def : Pat<(i64 (anyext Int16Regs:$a)),
+ (CVT_u64_u16 Int16Regs:$a, CvtNONE)>;
+
+// sext i32
+def : Pat<(i64 (sext Int32Regs:$a)),
+ (CVT_s64_s32 Int32Regs:$a, CvtNONE)>;
+
+// zext i32
+def : Pat<(i64 (zext Int32Regs:$a)),
+ (CVT_u64_u32 Int32Regs:$a, CvtNONE)>;
+
+// anyext i32
+def : Pat<(i64 (anyext Int32Regs:$a)),
+ (CVT_u64_u32 Int32Regs:$a, CvtNONE)>;
+
+
+// truncate i64
+def : Pat<(i32 (trunc Int64Regs:$a)),
+ (CVT_u32_u64 Int64Regs:$a, CvtNONE)>;
+def : Pat<(i16 (trunc Int64Regs:$a)),
+ (CVT_u16_u64 Int64Regs:$a, CvtNONE)>;
+def : Pat<(i1 (trunc Int64Regs:$a)),
+ (SETP_b64ri (ANDb64ri Int64Regs:$a, 1), 1, CmpEQ)>;
+
+// truncate i32
+def : Pat<(i16 (trunc Int32Regs:$a)),
+ (CVT_u16_u32 Int32Regs:$a, CvtNONE)>;
+def : Pat<(i1 (trunc Int32Regs:$a)),
+ (SETP_b32ri (ANDb32ri Int32Regs:$a, 1), 1, CmpEQ)>;
+
+// truncate i16
+def : Pat<(i1 (trunc Int16Regs:$a)),
+ (SETP_b16ri (ANDb16ri Int16Regs:$a, 1), 1, CmpEQ)>;
+
+// sext_inreg
+def : Pat<(sext_inreg Int16Regs:$a, i8), (CVT_INREG_s16_s8 Int16Regs:$a)>;
+def : Pat<(sext_inreg Int32Regs:$a, i8), (CVT_INREG_s32_s8 Int32Regs:$a)>;
+def : Pat<(sext_inreg Int32Regs:$a, i16), (CVT_INREG_s32_s16 Int32Regs:$a)>;
+def : Pat<(sext_inreg Int64Regs:$a, i8), (CVT_INREG_s64_s8 Int64Regs:$a)>;
+def : Pat<(sext_inreg Int64Regs:$a, i16), (CVT_INREG_s64_s16 Int64Regs:$a)>;
+def : Pat<(sext_inreg Int64Regs:$a, i32), (CVT_INREG_s64_s32 Int64Regs:$a)>;
+
+
+// Select instructions with 32-bit predicates
+def : Pat<(select Int32Regs:$pred, Int16Regs:$a, Int16Regs:$b),
+ (SELP_b16rr Int16Regs:$a, Int16Regs:$b,
+ (SETP_b32ri (ANDb32ri Int32Regs:$pred, 1), 1, CmpEQ))>;
+def : Pat<(select Int32Regs:$pred, Int32Regs:$a, Int32Regs:$b),
+ (SELP_b32rr Int32Regs:$a, Int32Regs:$b,
+ (SETP_b32ri (ANDb32ri Int32Regs:$pred, 1), 1, CmpEQ))>;
+def : Pat<(select Int32Regs:$pred, Int64Regs:$a, Int64Regs:$b),
+ (SELP_b64rr Int64Regs:$a, Int64Regs:$b,
+ (SETP_b32ri (ANDb32ri Int32Regs:$pred, 1), 1, CmpEQ))>;
+def : Pat<(select Int32Regs:$pred, Float16Regs:$a, Float16Regs:$b),
+ (SELP_f16rr Float16Regs:$a, Float16Regs:$b,
+ (SETP_b32ri (ANDb32ri Int32Regs:$pred, 1), 1, CmpEQ))>;
+def : Pat<(select Int32Regs:$pred, Float32Regs:$a, Float32Regs:$b),
+ (SELP_f32rr Float32Regs:$a, Float32Regs:$b,
+ (SETP_b32ri (ANDb32ri Int32Regs:$pred, 1), 1, CmpEQ))>;
+def : Pat<(select Int32Regs:$pred, Float64Regs:$a, Float64Regs:$b),
+ (SELP_f64rr Float64Regs:$a, Float64Regs:$b,
+ (SETP_b32ri (ANDb32ri Int32Regs:$pred, 1), 1, CmpEQ))>;
+
+
+let hasSideEffects = 0 in {
+ // pack a set of smaller int registers to a larger int register
+ def V4I16toI64 : NVPTXInst<(outs Int64Regs:$d),
+ (ins Int16Regs:$s1, Int16Regs:$s2,
+ Int16Regs:$s3, Int16Regs:$s4),
+ "mov.b64 \t$d, {{$s1, $s2, $s3, $s4}};", []>;
+ def V2I16toI32 : NVPTXInst<(outs Int32Regs:$d),
+ (ins Int16Regs:$s1, Int16Regs:$s2),
+ "mov.b32 \t$d, {{$s1, $s2}};", []>;
+ def V2I32toI64 : NVPTXInst<(outs Int64Regs:$d),
+ (ins Int32Regs:$s1, Int32Regs:$s2),
+ "mov.b64 \t$d, {{$s1, $s2}};", []>;
+ def V2F32toF64 : NVPTXInst<(outs Float64Regs:$d),
+ (ins Float32Regs:$s1, Float32Regs:$s2),
+ "mov.b64 \t$d, {{$s1, $s2}};", []>;
+
+ // unpack a larger int register to a set of smaller int registers
+ def I64toV4I16 : NVPTXInst<(outs Int16Regs:$d1, Int16Regs:$d2,
+ Int16Regs:$d3, Int16Regs:$d4),
+ (ins Int64Regs:$s),
+ "mov.b64 \t{{$d1, $d2, $d3, $d4}}, $s;", []>;
+ def I32toV2I16 : NVPTXInst<(outs Int16Regs:$d1, Int16Regs:$d2),
+ (ins Int32Regs:$s),
+ "mov.b32 \t{{$d1, $d2}}, $s;", []>;
+ def I64toV2I32 : NVPTXInst<(outs Int32Regs:$d1, Int32Regs:$d2),
+ (ins Int64Regs:$s),
+ "mov.b64 \t{{$d1, $d2}}, $s;", []>;
+ def F64toV2F32 : NVPTXInst<(outs Float32Regs:$d1, Float32Regs:$d2),
+ (ins Float64Regs:$s),
+ "mov.b64 \t{{$d1, $d2}}, $s;", []>;
+
+}
+
+let hasSideEffects = 0 in {
+ // Extract element of f16x2 register. PTX does not provide any way
+ // to access elements of f16x2 vector directly, so we need to
+ // extract it using a temporary register.
+ def F16x2toF16_0 : NVPTXInst<(outs Float16Regs:$dst),
+ (ins Float16x2Regs:$src),
+ "{{ .reg .b16 \t%tmp_hi;\n\t"
+ " mov.b32 \t{$dst, %tmp_hi}, $src; }}",
+ [(set Float16Regs:$dst,
+ (extractelt (v2f16 Float16x2Regs:$src), 0))]>;
+ def F16x2toF16_1 : NVPTXInst<(outs Float16Regs:$dst),
+ (ins Float16x2Regs:$src),
+ "{{ .reg .b16 \t%tmp_lo;\n\t"
+ " mov.b32 \t{%tmp_lo, $dst}, $src; }}",
+ [(set Float16Regs:$dst,
+ (extractelt (v2f16 Float16x2Regs:$src), 1))]>;
+
+ // Coalesce two f16 registers into f16x2
+ def BuildF16x2 : NVPTXInst<(outs Float16x2Regs:$dst),
+ (ins Float16Regs:$a, Float16Regs:$b),
+ "mov.b32 \t$dst, {{$a, $b}};",
+ [(set Float16x2Regs:$dst,
+ (build_vector (f16 Float16Regs:$a), (f16 Float16Regs:$b)))]>;
+
+ // Directly initializing underlying the b32 register is one less SASS
+ // instruction than than vector-packing move.
+ def BuildF16x2i : NVPTXInst<(outs Float16x2Regs:$dst), (ins i32imm:$src),
+ "mov.b32 \t$dst, $src;",
+ []>;
+
+ // Split f16x2 into two f16 registers.
+ def SplitF16x2 : NVPTXInst<(outs Float16Regs:$lo, Float16Regs:$hi),
+ (ins Float16x2Regs:$src),
+ "mov.b32 \t{{$lo, $hi}}, $src;",
+ []>;
+ // Split an i32 into two f16
+ def SplitI32toF16x2 : NVPTXInst<(outs Float16Regs:$lo, Float16Regs:$hi),
+ (ins Int32Regs:$src),
+ "mov.b32 \t{{$lo, $hi}}, $src;",
+ []>;
+}
+
+// Count leading zeros
+let hasSideEffects = 0 in {
+ def CLZr32 : NVPTXInst<(outs Int32Regs:$d), (ins Int32Regs:$a),
+ "clz.b32 \t$d, $a;", []>;
+ def CLZr64 : NVPTXInst<(outs Int32Regs:$d), (ins Int64Regs:$a),
+ "clz.b64 \t$d, $a;", []>;
+}
+
+// 32-bit has a direct PTX instruction
+def : Pat<(ctlz Int32Regs:$a), (CLZr32 Int32Regs:$a)>;
+
+// The return type of the ctlz ISD node is the same as its input, but the PTX
+// ctz instruction always returns a 32-bit value. For ctlz.i64, convert the
+// ptx value to 64 bits to match the ISD node's semantics, unless we know we're
+// truncating back down to 32 bits.
+def : Pat<(ctlz Int64Regs:$a), (CVT_u64_u32 (CLZr64 Int64Regs:$a), CvtNONE)>;
+def : Pat<(i32 (trunc (ctlz Int64Regs:$a))), (CLZr64 Int64Regs:$a)>;
+
+// For 16-bit ctlz, we zero-extend to 32-bit, perform the count, then trunc the
+// result back to 16-bits if necessary. We also need to subtract 16 because
+// the high-order 16 zeros were counted.
+//
+// TODO: NVPTX has a mov.b32 b32reg, {imm, b16reg} instruction, which we could
+// use to save one SASS instruction (on sm_35 anyway):
+//
+// mov.b32 $tmp, {0xffff, $a}
+// ctlz.b32 $result, $tmp
+//
+// That is, instead of zero-extending the input to 32 bits, we'd "one-extend"
+// and then ctlz that value. This way we don't have to subtract 16 from the
+// result. Unfortunately today we don't have a way to generate
+// "mov b32reg, {b16imm, b16reg}", so we don't do this optimization.
+def : Pat<(ctlz Int16Regs:$a),
+ (SUBi16ri (CVT_u16_u32
+ (CLZr32 (CVT_u32_u16 Int16Regs:$a, CvtNONE)), CvtNONE), 16)>;
+def : Pat<(i32 (zext (ctlz Int16Regs:$a))),
+ (SUBi32ri (CLZr32 (CVT_u32_u16 Int16Regs:$a, CvtNONE)), 16)>;
+
+// Population count
+let hasSideEffects = 0 in {
+ def POPCr32 : NVPTXInst<(outs Int32Regs:$d), (ins Int32Regs:$a),
+ "popc.b32 \t$d, $a;", []>;
+ def POPCr64 : NVPTXInst<(outs Int32Regs:$d), (ins Int64Regs:$a),
+ "popc.b64 \t$d, $a;", []>;
+}
+
+// 32-bit has a direct PTX instruction
+def : Pat<(ctpop Int32Regs:$a), (POPCr32 Int32Regs:$a)>;
+
+// For 64-bit, the result in PTX is actually 32-bit so we zero-extend to 64-bit
+// to match the LLVM semantics. Just as with ctlz.i64, we provide a second
+// pattern that avoids the type conversion if we're truncating the result to
+// i32 anyway.
+def : Pat<(ctpop Int64Regs:$a), (CVT_u64_u32 (POPCr64 Int64Regs:$a), CvtNONE)>;
+def : Pat<(i32 (trunc (ctpop Int64Regs:$a))), (POPCr64 Int64Regs:$a)>;
+
+// For 16-bit, we zero-extend to 32-bit, then trunc the result back to 16-bits.
+// If we know that we're storing into an i32, we can avoid the final trunc.
+def : Pat<(ctpop Int16Regs:$a),
+ (CVT_u16_u32 (POPCr32 (CVT_u32_u16 Int16Regs:$a, CvtNONE)), CvtNONE)>;
+def : Pat<(i32 (zext (ctpop Int16Regs:$a))),
+ (POPCr32 (CVT_u32_u16 Int16Regs:$a, CvtNONE))>;
+
+// fpround f32 -> f16
+def : Pat<(f16 (fpround Float32Regs:$a)),
+ (CVT_f16_f32 Float32Regs:$a, CvtRN_FTZ)>, Requires<[doF32FTZ]>;
+def : Pat<(f16 (fpround Float32Regs:$a)),
+ (CVT_f16_f32 Float32Regs:$a, CvtRN)>;
+
+// fpround f64 -> f16
+def : Pat<(f16 (fpround Float64Regs:$a)),
+ (CVT_f16_f64 Float64Regs:$a, CvtRN_FTZ)>, Requires<[doF32FTZ]>;
+def : Pat<(f16 (fpround Float64Regs:$a)),
+ (CVT_f16_f64 Float64Regs:$a, CvtRN)>;
+
+// fpround f64 -> f32
+def : Pat<(f32 (fpround Float64Regs:$a)),
+ (CVT_f32_f64 Float64Regs:$a, CvtRN_FTZ)>, Requires<[doF32FTZ]>;
+def : Pat<(f32 (fpround Float64Regs:$a)),
+ (CVT_f32_f64 Float64Regs:$a, CvtRN)>;
+
+// fpextend f16 -> f32
+def : Pat<(f32 (fpextend Float16Regs:$a)),
+ (CVT_f32_f16 Float16Regs:$a, CvtNONE_FTZ)>, Requires<[doF32FTZ]>;
+def : Pat<(f32 (fpextend Float16Regs:$a)),
+ (CVT_f32_f16 Float16Regs:$a, CvtNONE)>;
+
+// fpextend f16 -> f64
+def : Pat<(f64 (fpextend Float16Regs:$a)),
+ (CVT_f64_f16 Float16Regs:$a, CvtNONE_FTZ)>, Requires<[doF32FTZ]>;
+def : Pat<(f64 (fpextend Float16Regs:$a)),
+ (CVT_f64_f16 Float16Regs:$a, CvtNONE)>;
+
+// fpextend f32 -> f64
+def : Pat<(f64 (fpextend Float32Regs:$a)),
+ (CVT_f64_f32 Float32Regs:$a, CvtNONE_FTZ)>, Requires<[doF32FTZ]>;
+def : Pat<(f64 (fpextend Float32Regs:$a)),
+ (CVT_f64_f32 Float32Regs:$a, CvtNONE)>;
+
+def retflag : SDNode<"NVPTXISD::RET_FLAG", SDTNone,
+ [SDNPHasChain, SDNPOptInGlue]>;
+
+// fceil, ffloor, fround, ftrunc.
+
+def : Pat<(fceil Float16Regs:$a),
+ (CVT_f16_f16 Float16Regs:$a, CvtRPI_FTZ)>, Requires<[doF32FTZ]>;
+def : Pat<(fceil Float16Regs:$a),
+ (CVT_f16_f16 Float16Regs:$a, CvtRPI)>, Requires<[doNoF32FTZ]>;
+def : Pat<(fceil Float32Regs:$a),
+ (CVT_f32_f32 Float32Regs:$a, CvtRPI_FTZ)>, Requires<[doF32FTZ]>;
+def : Pat<(fceil Float32Regs:$a),
+ (CVT_f32_f32 Float32Regs:$a, CvtRPI)>, Requires<[doNoF32FTZ]>;
+def : Pat<(fceil Float64Regs:$a),
+ (CVT_f64_f64 Float64Regs:$a, CvtRPI)>;
+
+def : Pat<(ffloor Float16Regs:$a),
+ (CVT_f16_f16 Float16Regs:$a, CvtRMI_FTZ)>, Requires<[doF32FTZ]>;
+def : Pat<(ffloor Float16Regs:$a),
+ (CVT_f16_f16 Float16Regs:$a, CvtRMI)>, Requires<[doNoF32FTZ]>;
+def : Pat<(ffloor Float32Regs:$a),
+ (CVT_f32_f32 Float32Regs:$a, CvtRMI_FTZ)>, Requires<[doF32FTZ]>;
+def : Pat<(ffloor Float32Regs:$a),
+ (CVT_f32_f32 Float32Regs:$a, CvtRMI)>, Requires<[doNoF32FTZ]>;
+def : Pat<(ffloor Float64Regs:$a),
+ (CVT_f64_f64 Float64Regs:$a, CvtRMI)>;
+
+def : Pat<(fround Float16Regs:$a),
+ (CVT_f16_f16 Float16Regs:$a, CvtRNI_FTZ)>, Requires<[doF32FTZ]>;
+def : Pat<(f16 (fround Float16Regs:$a)),
+ (CVT_f16_f16 Float16Regs:$a, CvtRNI)>, Requires<[doNoF32FTZ]>;
+def : Pat<(fround Float32Regs:$a),
+ (CVT_f32_f32 Float32Regs:$a, CvtRNI_FTZ)>, Requires<[doF32FTZ]>;
+def : Pat<(f32 (fround Float32Regs:$a)),
+ (CVT_f32_f32 Float32Regs:$a, CvtRNI)>, Requires<[doNoF32FTZ]>;
+def : Pat<(f64 (fround Float64Regs:$a)),
+ (CVT_f64_f64 Float64Regs:$a, CvtRNI)>;
+
+def : Pat<(ftrunc Float16Regs:$a),
+ (CVT_f16_f16 Float16Regs:$a, CvtRZI_FTZ)>, Requires<[doF32FTZ]>;
+def : Pat<(ftrunc Float16Regs:$a),
+ (CVT_f16_f16 Float16Regs:$a, CvtRZI)>, Requires<[doNoF32FTZ]>;
+def : Pat<(ftrunc Float32Regs:$a),
+ (CVT_f32_f32 Float32Regs:$a, CvtRZI_FTZ)>, Requires<[doF32FTZ]>;
+def : Pat<(ftrunc Float32Regs:$a),
+ (CVT_f32_f32 Float32Regs:$a, CvtRZI)>, Requires<[doNoF32FTZ]>;
+def : Pat<(ftrunc Float64Regs:$a),
+ (CVT_f64_f64 Float64Regs:$a, CvtRZI)>;
+
+// nearbyint and rint are implemented as rounding to nearest even. This isn't
+// strictly correct, because it causes us to ignore the rounding mode. But it
+// matches what CUDA's "libm" does.
+
+def : Pat<(fnearbyint Float16Regs:$a),
+ (CVT_f16_f16 Float16Regs:$a, CvtRNI_FTZ)>, Requires<[doF32FTZ]>;
+def : Pat<(fnearbyint Float16Regs:$a),
+ (CVT_f16_f16 Float16Regs:$a, CvtRNI)>, Requires<[doNoF32FTZ]>;
+def : Pat<(fnearbyint Float32Regs:$a),
+ (CVT_f32_f32 Float32Regs:$a, CvtRNI_FTZ)>, Requires<[doF32FTZ]>;
+def : Pat<(fnearbyint Float32Regs:$a),
+ (CVT_f32_f32 Float32Regs:$a, CvtRNI)>, Requires<[doNoF32FTZ]>;
+def : Pat<(fnearbyint Float64Regs:$a),
+ (CVT_f64_f64 Float64Regs:$a, CvtRNI)>;
+
+def : Pat<(frint Float16Regs:$a),
+ (CVT_f16_f16 Float16Regs:$a, CvtRNI_FTZ)>, Requires<[doF32FTZ]>;
+def : Pat<(frint Float16Regs:$a),
+ (CVT_f16_f16 Float16Regs:$a, CvtRNI)>, Requires<[doNoF32FTZ]>;
+def : Pat<(frint Float32Regs:$a),
+ (CVT_f32_f32 Float32Regs:$a, CvtRNI_FTZ)>, Requires<[doF32FTZ]>;
+def : Pat<(frint Float32Regs:$a),
+ (CVT_f32_f32 Float32Regs:$a, CvtRNI)>, Requires<[doNoF32FTZ]>;
+def : Pat<(frint Float64Regs:$a),
+ (CVT_f64_f64 Float64Regs:$a, CvtRNI)>;
+
+
+//-----------------------------------
+// Control-flow
+//-----------------------------------
+
+let isTerminator=1 in {
+ let isReturn=1, isBarrier=1 in
+ def Return : NVPTXInst<(outs), (ins), "ret;", [(retflag)]>;
+
+ let isBranch=1 in
+ def CBranch : NVPTXInst<(outs), (ins Int1Regs:$a, brtarget:$target),
+ "@$a bra \t$target;",
+ [(brcond Int1Regs:$a, bb:$target)]>;
+ let isBranch=1 in
+ def CBranchOther : NVPTXInst<(outs), (ins Int1Regs:$a, brtarget:$target),
+ "@!$a bra \t$target;", []>;
+
+ let isBranch=1, isBarrier=1 in
+ def GOTO : NVPTXInst<(outs), (ins brtarget:$target),
+ "bra.uni \t$target;", [(br bb:$target)]>;
+}
+
+def : Pat<(brcond Int32Regs:$a, bb:$target),
+ (CBranch (SETP_u32ri Int32Regs:$a, 0, CmpNE), bb:$target)>;
+
+// SelectionDAGBuilder::visitSWitchCase() will invert the condition of a
+// conditional branch if the target block is the next block so that the code
+// can fall through to the target block. The invertion is done by 'xor
+// condition, 1', which will be translated to (setne condition, -1). Since ptx
+// supports '@!pred bra target', we should use it.
+def : Pat<(brcond (i1 (setne Int1Regs:$a, -1)), bb:$target),
+ (CBranchOther Int1Regs:$a, bb:$target)>;
+
+// Call
+def SDT_NVPTXCallSeqStart : SDCallSeqStart<[SDTCisVT<0, i32>]>;
+def SDT_NVPTXCallSeqEnd : SDCallSeqEnd<[SDTCisVT<0, i32>, SDTCisVT<1, i32>]>;
+
+def callseq_start : SDNode<"ISD::CALLSEQ_START", SDT_NVPTXCallSeqStart,
+ [SDNPHasChain, SDNPOutGlue, SDNPSideEffect]>;
+def callseq_end : SDNode<"ISD::CALLSEQ_END", SDT_NVPTXCallSeqEnd,
+ [SDNPHasChain, SDNPOptInGlue, SDNPOutGlue,
+ SDNPSideEffect]>;
+
+def SDT_NVPTXCall : SDTypeProfile<0, 1, [SDTCisVT<0, i32>]>;
+def call : SDNode<"NVPTXISD::CALL", SDT_NVPTXCall,
+ [SDNPHasChain, SDNPOptInGlue, SDNPOutGlue]>;
+def calltarget : Operand<i32>;
+let isCall=1 in {
+ def CALL : NVPTXInst<(outs), (ins calltarget:$dst), "call \t$dst, (1);", []>;
+}
+
+def : Pat<(call tglobaladdr:$dst), (CALL tglobaladdr:$dst)>;
+def : Pat<(call texternalsym:$dst), (CALL texternalsym:$dst)>;
+
+// Pseudo instructions.
+class Pseudo<dag outs, dag ins, string asmstr, list<dag> pattern>
+ : NVPTXInst<outs, ins, asmstr, pattern>;
+
+def Callseq_Start :
+ NVPTXInst<(outs), (ins i32imm:$amt),
+ "\\{ // callseq $amt\n"
+ "\t.reg .b32 temp_param_reg;",
+ [(callseq_start timm:$amt)]>;
+def Callseq_End :
+ NVPTXInst<(outs), (ins i32imm:$amt1, i32imm:$amt2),
+ "\\} // callseq $amt1",
+ [(callseq_end timm:$amt1, timm:$amt2)]>;
+
+// trap instruction
+def trapinst : NVPTXInst<(outs), (ins), "trap;", [(trap)]>;
+
+// Call prototype wrapper
+def SDTCallPrototype : SDTypeProfile<0, 1, [SDTCisInt<0>]>;
+def CallPrototype :
+ SDNode<"NVPTXISD::CallPrototype", SDTCallPrototype,
+ [SDNPHasChain, SDNPOutGlue, SDNPInGlue, SDNPSideEffect]>;
+def ProtoIdent : Operand<i32> {
+ let PrintMethod = "printProtoIdent";
+}
+def CALL_PROTOTYPE :
+ NVPTXInst<(outs), (ins ProtoIdent:$ident),
+ "$ident", [(CallPrototype (i32 texternalsym:$ident))]>;
+
+
+include "NVPTXIntrinsics.td"
+
+
+//-----------------------------------
+// Notes
+//-----------------------------------
+// BSWAP is currently expanded. The following is a more efficient
+// - for < sm_20, use vector scalar mov, as tesla support native 16-bit register
+// - for sm_20, use pmpt (use vector scalar mov to get the pack and
+// unpack). sm_20 supports native 32-bit register, but not native 16-bit
+// register.
diff --git a/lib/Target/PowerPC/PPCFrameLowering.cpp b/lib/Target/PowerPC/PPCFrameLowering.cpp
index 2a402deccbca..40bfe3a449f7 100644
--- a/lib/Target/PowerPC/PPCFrameLowering.cpp
+++ b/lib/Target/PowerPC/PPCFrameLowering.cpp
@@ -1459,8 +1459,7 @@ void PPCFrameLowering::emitEpilogue(MachineFunction &MF,
}
if (FI->usesPICBase())
- BuildMI(MBB, MBBI, dl, LoadInst)
- .addReg(PPC::R30)
+ BuildMI(MBB, MBBI, dl, LoadInst, PPC::R30)
.addImm(PBPOffset)
.addReg(RBReg);
diff --git a/lib/Target/PowerPC/PPCISelLowering.cpp b/lib/Target/PowerPC/PPCISelLowering.cpp
index 483e9b171d57..685f24cb502e 100644
--- a/lib/Target/PowerPC/PPCISelLowering.cpp
+++ b/lib/Target/PowerPC/PPCISelLowering.cpp
@@ -12031,7 +12031,7 @@ void PPCTargetLowering::computeKnownBitsForTargetNode(const SDValue Op,
const APInt &DemandedElts,
const SelectionDAG &DAG,
unsigned Depth) const {
- Known.Zero.clearAllBits(); Known.One.clearAllBits();
+ Known.resetAll();
switch (Op.getOpcode()) {
default: break;
case PPCISD::LBRX: {
diff --git a/lib/Target/Sparc/SparcISelLowering.cpp b/lib/Target/Sparc/SparcISelLowering.cpp
index c44e371856a5..acb34d5baaa8 100644
--- a/lib/Target/Sparc/SparcISelLowering.cpp
+++ b/lib/Target/Sparc/SparcISelLowering.cpp
@@ -1881,7 +1881,7 @@ void SparcTargetLowering::computeKnownBitsForTargetNode
const SelectionDAG &DAG,
unsigned Depth) const {
KnownBits Known2;
- Known.Zero.clearAllBits(); Known.One.clearAllBits();
+ Known.resetAll();
switch (Op.getOpcode()) {
default: break;
diff --git a/lib/Target/SystemZ/SystemZInstrInfo.cpp b/lib/Target/SystemZ/SystemZInstrInfo.cpp
index fee008b9572a..a30bf34857b5 100644
--- a/lib/Target/SystemZ/SystemZInstrInfo.cpp
+++ b/lib/Target/SystemZ/SystemZInstrInfo.cpp
@@ -850,12 +850,18 @@ void SystemZInstrInfo::copyPhysReg(MachineBasicBlock &MBB,
MachineBasicBlock::iterator MBBI,
const DebugLoc &DL, unsigned DestReg,
unsigned SrcReg, bool KillSrc) const {
- // Split 128-bit GPR moves into two 64-bit moves. This handles ADDR128 too.
+ // Split 128-bit GPR moves into two 64-bit moves. Add implicit uses of the
+ // super register in case one of the subregs is undefined.
+ // This handles ADDR128 too.
if (SystemZ::GR128BitRegClass.contains(DestReg, SrcReg)) {
copyPhysReg(MBB, MBBI, DL, RI.getSubReg(DestReg, SystemZ::subreg_h64),
RI.getSubReg(SrcReg, SystemZ::subreg_h64), KillSrc);
+ MachineInstrBuilder(*MBB.getParent(), std::prev(MBBI))
+ .addReg(SrcReg, RegState::Implicit);
copyPhysReg(MBB, MBBI, DL, RI.getSubReg(DestReg, SystemZ::subreg_l64),
RI.getSubReg(SrcReg, SystemZ::subreg_l64), KillSrc);
+ MachineInstrBuilder(*MBB.getParent(), std::prev(MBBI))
+ .addReg(SrcReg, (getKillRegState(KillSrc) | RegState::Implicit));
return;
}
diff --git a/lib/Target/X86/AsmParser/X86AsmParser.cpp b/lib/Target/X86/AsmParser/X86AsmParser.cpp
index c1cfc82b4a81..32ab475f1186 100644
--- a/lib/Target/X86/AsmParser/X86AsmParser.cpp
+++ b/lib/Target/X86/AsmParser/X86AsmParser.cpp
@@ -776,11 +776,6 @@ private:
bool ParseZ(std::unique_ptr<X86Operand> &Z, const SMLoc &StartLoc);
- /// MS-compatibility:
- /// Obtain an appropriate size qualifier, when facing its absence,
- /// upon AVX512 vector/broadcast memory operand
- unsigned AdjustAVX512Mem(unsigned Size, X86Operand* UnsizedMemOpNext);
-
bool is64BitMode() const {
// FIXME: Can tablegen auto-generate this?
return getSTI().getFeatureBits()[X86::Mode64Bit];
@@ -1206,27 +1201,16 @@ std::unique_ptr<X86Operand> X86AsmParser::CreateMemForInlineAsm(
Identifier, Info.OpDecl);
}
+
// We either have a direct symbol reference, or an offset from a symbol. The
// parser always puts the symbol on the LHS, so look there for size
// calculation purposes.
+ unsigned FrontendSize = 0;
const MCBinaryExpr *BinOp = dyn_cast<MCBinaryExpr>(Disp);
bool IsSymRef =
isa<MCSymbolRefExpr>(BinOp ? BinOp->getLHS() : Disp);
- if (IsSymRef) {
- if (!Size) {
- Size = Info.Type * 8; // Size is in terms of bits in this context.
- if (Size)
- InstInfo->AsmRewrites->emplace_back(AOK_SizeDirective, Start,
- /*Len=*/0, Size);
- if (AllowBetterSizeMatch)
- // Handle cases where size qualifier is absent, upon an indirect symbol
- // reference - e.g. "vaddps zmm1, zmm2, [var]"
- // set Size to zero to allow matching mechansim to try and find a better
- // size qualifier than our initial guess, based on available variants of
- // the given instruction
- Size = 0;
- }
- }
+ if (IsSymRef && !Size && Info.Type)
+ FrontendSize = Info.Type * 8; // Size is in terms of bits in this context.
// When parsing inline assembly we set the base register to a non-zero value
// if we don't know the actual value at this time. This is necessary to
@@ -1234,7 +1218,7 @@ std::unique_ptr<X86Operand> X86AsmParser::CreateMemForInlineAsm(
BaseReg = BaseReg ? BaseReg : 1;
return X86Operand::CreateMem(getPointerWidth(), SegReg, Disp, BaseReg,
IndexReg, Scale, Start, End, Size, Identifier,
- Info.OpDecl);
+ Info.OpDecl, FrontendSize);
}
static void
@@ -2884,23 +2868,6 @@ bool X86AsmParser::MatchAndEmitATTInstruction(SMLoc IDLoc, unsigned &Opcode,
return true;
}
-unsigned X86AsmParser::AdjustAVX512Mem(unsigned Size,
- X86Operand* UnsizedMemOpNext) {
- // Check for the existence of an AVX512 platform
- if (!getSTI().getFeatureBits()[X86::FeatureAVX512])
- return 0;
- // Allow adjusting upon a (x|y|z)mm
- if (Size == 512 || Size == 256 || Size == 128)
- return Size;
- // This is an allegadly broadcasting mem op adjustment,
- // allow some more inquiring to validate it
- if (Size == 64 || Size == 32)
- return UnsizedMemOpNext && UnsizedMemOpNext->isToken() &&
- UnsizedMemOpNext->getToken().substr(0, 4).equals("{1to") ? Size : 0;
- // Do not allow any other type of adjustments
- return 0;
-}
-
bool X86AsmParser::MatchAndEmitIntelInstruction(SMLoc IDLoc, unsigned &Opcode,
OperandVector &Operands,
MCStreamer &Out,
@@ -2920,19 +2887,14 @@ bool X86AsmParser::MatchAndEmitIntelInstruction(SMLoc IDLoc, unsigned &Opcode,
// Find one unsized memory operand, if present.
X86Operand *UnsizedMemOp = nullptr;
- // If unsized memory operand was found - obtain following operand.
- // For use in AdjustAVX512Mem
- X86Operand *UnsizedMemOpNext = nullptr;
for (const auto &Op : Operands) {
X86Operand *X86Op = static_cast<X86Operand *>(Op.get());
- if (UnsizedMemOp) {
- UnsizedMemOpNext = X86Op;
+ if (X86Op->isMemUnsized()) {
+ UnsizedMemOp = X86Op;
// Have we found an unqualified memory operand,
// break. IA allows only one memory operand.
break;
}
- if (X86Op->isMemUnsized())
- UnsizedMemOp = X86Op;
}
// Allow some instructions to have implicitly pointer-sized operands. This is
@@ -2978,7 +2940,6 @@ bool X86AsmParser::MatchAndEmitIntelInstruction(SMLoc IDLoc, unsigned &Opcode,
// If an unsized memory operand is present, try to match with each memory
// operand size. In Intel assembly, the size is not part of the instruction
// mnemonic.
- unsigned MatchedSize = 0;
if (UnsizedMemOp && UnsizedMemOp->isMemUnsized()) {
static const unsigned MopSizes[] = {8, 16, 32, 64, 80, 128, 256, 512};
for (unsigned Size : MopSizes) {
@@ -2993,17 +2954,10 @@ bool X86AsmParser::MatchAndEmitIntelInstruction(SMLoc IDLoc, unsigned &Opcode,
// If this returned as a missing feature failure, remember that.
if (Match.back() == Match_MissingFeature)
ErrorInfoMissingFeature = ErrorInfoIgnore;
- if (M == Match_Success)
- // MS-compatability:
- // Adjust AVX512 vector/broadcast memory operand,
- // when facing the absence of a size qualifier.
- // Match GCC behavior on respective cases.
- MatchedSize = AdjustAVX512Mem(Size, UnsizedMemOpNext);
}
// Restore the size of the unsized memory operand if we modified it.
- if (UnsizedMemOp)
- UnsizedMemOp->Mem.Size = 0;
+ UnsizedMemOp->Mem.Size = 0;
}
// If we haven't matched anything yet, this is not a basic integer or FPU
@@ -3027,20 +2981,30 @@ bool X86AsmParser::MatchAndEmitIntelInstruction(SMLoc IDLoc, unsigned &Opcode,
Op.getLocRange(), MatchingInlineAsm);
}
+ unsigned NumSuccessfulMatches =
+ std::count(std::begin(Match), std::end(Match), Match_Success);
+
+ // If matching was ambiguous and we had size information from the frontend,
+ // try again with that. This handles cases like "movxz eax, m8/m16".
+ if (UnsizedMemOp && NumSuccessfulMatches > 1 &&
+ UnsizedMemOp->getMemFrontendSize()) {
+ UnsizedMemOp->Mem.Size = UnsizedMemOp->getMemFrontendSize();
+ unsigned M = MatchInstruction(
+ Operands, Inst, ErrorInfo, MatchingInlineAsm, isParsingIntelSyntax());
+ if (M == Match_Success)
+ NumSuccessfulMatches = 1;
+
+ // Add a rewrite that encodes the size information we used from the
+ // frontend.
+ InstInfo->AsmRewrites->emplace_back(
+ AOK_SizeDirective, UnsizedMemOp->getStartLoc(),
+ /*Len=*/0, UnsizedMemOp->getMemFrontendSize());
+ }
+
// If exactly one matched, then we treat that as a successful match (and the
// instruction will already have been filled in correctly, since the failing
// matches won't have modified it).
- unsigned NumSuccessfulMatches =
- std::count(std::begin(Match), std::end(Match), Match_Success);
if (NumSuccessfulMatches == 1) {
- if (MatchedSize && isParsingInlineAsm() && isParsingIntelSyntax())
- // MS compatibility -
- // Fix the rewrite according to the matched memory size
- // MS inline assembly only
- for (AsmRewrite &AR : *InstInfo->AsmRewrites)
- if ((AR.Loc.getPointer() == UnsizedMemOp->StartLoc.getPointer()) &&
- (AR.Kind == AOK_SizeDirective))
- AR.Val = MatchedSize;
// Some instructions need post-processing to, for example, tweak which
// encoding is selected. Loop on it while changes happen so the individual
// transformations can chain off each other.
@@ -3057,7 +3021,7 @@ bool X86AsmParser::MatchAndEmitIntelInstruction(SMLoc IDLoc, unsigned &Opcode,
"multiple matches only possible with unsized memory operands");
return Error(UnsizedMemOp->getStartLoc(),
"ambiguous operand size for instruction '" + Mnemonic + "\'",
- UnsizedMemOp->getLocRange(), MatchingInlineAsm);
+ UnsizedMemOp->getLocRange());
}
// If one instruction matched with a missing feature, report this as a
diff --git a/lib/Target/X86/AsmParser/X86Operand.h b/lib/Target/X86/AsmParser/X86Operand.h
index 9f1fa6c65907..33eff14b8215 100644
--- a/lib/Target/X86/AsmParser/X86Operand.h
+++ b/lib/Target/X86/AsmParser/X86Operand.h
@@ -62,6 +62,10 @@ struct X86Operand : public MCParsedAsmOperand {
unsigned Scale;
unsigned Size;
unsigned ModeSize;
+
+ /// If the memory operand is unsized and there are multiple instruction
+ /// matches, prefer the one with this size.
+ unsigned FrontendSize;
};
union {
@@ -136,6 +140,10 @@ struct X86Operand : public MCParsedAsmOperand {
assert(Kind == Memory && "Invalid access!");
return Mem.ModeSize;
}
+ unsigned getMemFrontendSize() const {
+ assert(Kind == Memory && "Invalid access!");
+ return Mem.FrontendSize;
+ }
bool isToken() const override {return Kind == Token; }
@@ -512,7 +520,7 @@ struct X86Operand : public MCParsedAsmOperand {
static std::unique_ptr<X86Operand>
CreateMem(unsigned ModeSize, const MCExpr *Disp, SMLoc StartLoc, SMLoc EndLoc,
unsigned Size = 0, StringRef SymName = StringRef(),
- void *OpDecl = nullptr) {
+ void *OpDecl = nullptr, unsigned FrontendSize = 0) {
auto Res = llvm::make_unique<X86Operand>(Memory, StartLoc, EndLoc);
Res->Mem.SegReg = 0;
Res->Mem.Disp = Disp;
@@ -521,6 +529,7 @@ struct X86Operand : public MCParsedAsmOperand {
Res->Mem.Scale = 1;
Res->Mem.Size = Size;
Res->Mem.ModeSize = ModeSize;
+ Res->Mem.FrontendSize = FrontendSize;
Res->SymName = SymName;
Res->OpDecl = OpDecl;
Res->AddressOf = false;
@@ -532,7 +541,7 @@ struct X86Operand : public MCParsedAsmOperand {
CreateMem(unsigned ModeSize, unsigned SegReg, const MCExpr *Disp,
unsigned BaseReg, unsigned IndexReg, unsigned Scale, SMLoc StartLoc,
SMLoc EndLoc, unsigned Size = 0, StringRef SymName = StringRef(),
- void *OpDecl = nullptr) {
+ void *OpDecl = nullptr, unsigned FrontendSize = 0) {
// We should never just have a displacement, that should be parsed as an
// absolute memory operand.
assert((SegReg || BaseReg || IndexReg) && "Invalid memory operand!");
@@ -548,6 +557,7 @@ struct X86Operand : public MCParsedAsmOperand {
Res->Mem.Scale = Scale;
Res->Mem.Size = Size;
Res->Mem.ModeSize = ModeSize;
+ Res->Mem.FrontendSize = FrontendSize;
Res->SymName = SymName;
Res->OpDecl = OpDecl;
Res->AddressOf = false;
diff --git a/lib/Target/X86/X86AsmPrinter.h b/lib/Target/X86/X86AsmPrinter.h
index 44bc373b0394..d7c3b74d3efb 100644
--- a/lib/Target/X86/X86AsmPrinter.h
+++ b/lib/Target/X86/X86AsmPrinter.h
@@ -91,6 +91,7 @@ class LLVM_LIBRARY_VISIBILITY X86AsmPrinter : public AsmPrinter {
X86MCInstLower &MCIL);
void LowerPATCHABLE_RET(const MachineInstr &MI, X86MCInstLower &MCIL);
void LowerPATCHABLE_TAIL_CALL(const MachineInstr &MI, X86MCInstLower &MCIL);
+ void LowerPATCHABLE_EVENT_CALL(const MachineInstr &MI, X86MCInstLower &MCIL);
void LowerFENTRY_CALL(const MachineInstr &MI, X86MCInstLower &MCIL);
diff --git a/lib/Target/X86/X86FrameLowering.cpp b/lib/Target/X86/X86FrameLowering.cpp
index a94045cd536d..331e56976db7 100644
--- a/lib/Target/X86/X86FrameLowering.cpp
+++ b/lib/Target/X86/X86FrameLowering.cpp
@@ -2990,6 +2990,10 @@ unsigned X86FrameLowering::getWinEHParentFrameOffset(const MachineFunction &MF)
void X86FrameLowering::processFunctionBeforeFrameFinalized(
MachineFunction &MF, RegScavenger *RS) const {
+ // Mark the function as not having WinCFI. We will set it back to true in
+ // emitPrologue if it gets called and emits CFI.
+ MF.setHasWinCFI(false);
+
// If this function isn't doing Win64-style C++ EH, we don't need to do
// anything.
const Function *Fn = MF.getFunction();
diff --git a/lib/Target/X86/X86ISelLowering.cpp b/lib/Target/X86/X86ISelLowering.cpp
index 83542aaa013b..9ee2234595f9 100644
--- a/lib/Target/X86/X86ISelLowering.cpp
+++ b/lib/Target/X86/X86ISelLowering.cpp
@@ -1224,10 +1224,7 @@ X86TargetLowering::X86TargetLowering(const X86TargetMachine &TM,
setOperationAction(ISD::TRUNCATE, MVT::i1, Custom);
setOperationAction(ISD::TRUNCATE, MVT::v16i8, Custom);
setOperationAction(ISD::TRUNCATE, MVT::v8i32, Custom);
- setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v8i1, Custom);
- setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v16i1, Custom);
- setOperationAction(ISD::VSELECT, MVT::v8i1, Expand);
- setOperationAction(ISD::VSELECT, MVT::v16i1, Expand);
+
if (Subtarget.hasDQI()) {
for (auto VT : { MVT::v2i64, MVT::v4i64, MVT::v8i64 }) {
setOperationAction(ISD::SINT_TO_FP, VT, Legal);
@@ -1243,8 +1240,6 @@ X86TargetLowering::X86TargetLowering(const X86TargetMachine &TM,
}
}
if (Subtarget.hasVLX()) {
- setOperationAction(ISD::ABS, MVT::v4i64, Legal);
- setOperationAction(ISD::ABS, MVT::v2i64, Legal);
setOperationAction(ISD::SINT_TO_FP, MVT::v8i32, Legal);
setOperationAction(ISD::UINT_TO_FP, MVT::v8i32, Legal);
setOperationAction(ISD::FP_TO_SINT, MVT::v8i32, Legal);
@@ -1270,8 +1265,6 @@ X86TargetLowering::X86TargetLowering(const X86TargetMachine &TM,
setLoadExtAction(ISD::EXTLOAD, MVT::v2i64, MVT::v2i32, Legal);
}
- setOperationAction(ISD::TRUNCATE, MVT::v8i1, Custom);
- setOperationAction(ISD::TRUNCATE, MVT::v16i1, Custom);
setOperationAction(ISD::TRUNCATE, MVT::v16i16, Custom);
setOperationAction(ISD::ZERO_EXTEND, MVT::v16i32, Custom);
setOperationAction(ISD::ZERO_EXTEND, MVT::v8i64, Custom);
@@ -1304,33 +1297,34 @@ X86TargetLowering::X86TargetLowering(const X86TargetMachine &TM,
setOperationAction(ISD::CONCAT_VECTORS, MVT::v16i32, Custom);
setOperationAction(ISD::CONCAT_VECTORS, MVT::v16i1, Custom);
- setOperationAction(ISD::SETCC, MVT::v16i1, Custom);
- setOperationAction(ISD::SETCC, MVT::v8i1, Custom);
-
setOperationAction(ISD::MUL, MVT::v8i64, Custom);
- setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v8i1, Custom);
- setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v16i1, Custom);
setOperationAction(ISD::INSERT_SUBVECTOR, MVT::v16i1, Custom);
- setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v16i1, Custom);
- setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v8i1, Custom);
- setOperationAction(ISD::BUILD_VECTOR, MVT::v8i1, Custom);
- setOperationAction(ISD::BUILD_VECTOR, MVT::v16i1, Custom);
setOperationAction(ISD::SELECT, MVT::v8f64, Custom);
setOperationAction(ISD::SELECT, MVT::v8i64, Custom);
setOperationAction(ISD::SELECT, MVT::v16f32, Custom);
- setOperationAction(ISD::SELECT, MVT::v16i1, Custom);
- setOperationAction(ISD::SELECT, MVT::v8i1, Custom);
-
- setOperationAction(ISD::ADD, MVT::v8i1, Custom);
- setOperationAction(ISD::ADD, MVT::v16i1, Custom);
- setOperationAction(ISD::SUB, MVT::v8i1, Custom);
- setOperationAction(ISD::SUB, MVT::v16i1, Custom);
- setOperationAction(ISD::MUL, MVT::v8i1, Custom);
- setOperationAction(ISD::MUL, MVT::v16i1, Custom);
setOperationAction(ISD::MUL, MVT::v16i32, Legal);
+ // NonVLX sub-targets extend 128/256 vectors to use the 512 version.
+ setOperationAction(ISD::ABS, MVT::v4i64, Legal);
+ setOperationAction(ISD::ABS, MVT::v2i64, Legal);
+
+ for (auto VT : { MVT::v8i1, MVT::v16i1 }) {
+ setOperationAction(ISD::ADD, VT, Custom);
+ setOperationAction(ISD::SUB, VT, Custom);
+ setOperationAction(ISD::MUL, VT, Custom);
+ setOperationAction(ISD::SETCC, VT, Custom);
+ setOperationAction(ISD::SELECT, VT, Custom);
+ setOperationAction(ISD::TRUNCATE, VT, Custom);
+
+ setOperationAction(ISD::BUILD_VECTOR, VT, Custom);
+ setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Custom);
+ setOperationAction(ISD::INSERT_VECTOR_ELT, VT, Custom);
+ setOperationAction(ISD::VECTOR_SHUFFLE, VT, Custom);
+ setOperationAction(ISD::VSELECT, VT, Expand);
+ }
+
for (auto VT : { MVT::v16i32, MVT::v8i64 }) {
setOperationAction(ISD::SMAX, VT, Legal);
setOperationAction(ISD::UMAX, VT, Legal);
@@ -1352,33 +1346,12 @@ X86TargetLowering::X86TargetLowering(const X86TargetMachine &TM,
setOperationPromotedToType(ISD::XOR, MVT::v16i32, MVT::v8i64);
if (Subtarget.hasCDI()) {
- setOperationAction(ISD::CTLZ, MVT::v8i64, Legal);
- setOperationAction(ISD::CTLZ, MVT::v16i32, Legal);
-
- setOperationAction(ISD::CTLZ, MVT::v8i16, Custom);
- setOperationAction(ISD::CTLZ, MVT::v16i8, Custom);
- setOperationAction(ISD::CTLZ, MVT::v16i16, Custom);
- setOperationAction(ISD::CTLZ, MVT::v32i8, Custom);
-
- setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::v8i64, Custom);
- setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::v16i32, Custom);
-
- if (Subtarget.hasVLX()) {
- setOperationAction(ISD::CTLZ, MVT::v4i64, Legal);
- setOperationAction(ISD::CTLZ, MVT::v8i32, Legal);
- setOperationAction(ISD::CTLZ, MVT::v2i64, Legal);
- setOperationAction(ISD::CTLZ, MVT::v4i32, Legal);
- } else {
- setOperationAction(ISD::CTLZ, MVT::v4i64, Custom);
- setOperationAction(ISD::CTLZ, MVT::v8i32, Custom);
- setOperationAction(ISD::CTLZ, MVT::v2i64, Custom);
- setOperationAction(ISD::CTLZ, MVT::v4i32, Custom);
+ // NonVLX sub-targets extend 128/256 vectors to use the 512 version.
+ for (auto VT : {MVT::v4i32, MVT::v8i32, MVT::v16i32, MVT::v2i64,
+ MVT::v4i64, MVT::v8i64}) {
+ setOperationAction(ISD::CTLZ, VT, Legal);
+ setOperationAction(ISD::CTTZ_ZERO_UNDEF, VT, Custom);
}
-
- setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::v4i64, Custom);
- setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::v8i32, Custom);
- setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::v2i64, Custom);
- setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::v4i32, Custom);
} // Subtarget.hasCDI()
if (Subtarget.hasDQI()) {
@@ -6070,7 +6043,7 @@ static SDValue LowerBuildVectorv16i8(SDValue Op, unsigned NonZeros,
unsigned NumNonZero, unsigned NumZero,
SelectionDAG &DAG,
const X86Subtarget &Subtarget) {
- if (NumNonZero > 8)
+ if (NumNonZero > 8 && !Subtarget.hasSSE41())
return SDValue();
SDLoc dl(Op);
@@ -6158,7 +6131,7 @@ static SDValue LowerBuildVectorv8i16(SDValue Op, unsigned NonZeros,
unsigned NumNonZero, unsigned NumZero,
SelectionDAG &DAG,
const X86Subtarget &Subtarget) {
- if (NumNonZero > 4)
+ if (NumNonZero > 4 && !Subtarget.hasSSE41())
return SDValue();
SDLoc dl(Op);
@@ -6241,7 +6214,7 @@ static SDValue LowerBuildVectorv4x32(SDValue Op, SelectionDAG &DAG,
Elt = Op->getOperand(EltIdx);
// By construction, Elt is a EXTRACT_VECTOR_ELT with constant index.
- EltMaskIdx = cast<ConstantSDNode>(Elt.getOperand(1))->getZExtValue();
+ EltMaskIdx = Elt.getConstantOperandVal(1);
if (Elt.getOperand(0) != V1 || EltMaskIdx != EltIdx)
break;
Mask[EltIdx] = EltIdx;
@@ -6272,8 +6245,7 @@ static SDValue LowerBuildVectorv4x32(SDValue Op, SelectionDAG &DAG,
SDValue SrcVector = Current->getOperand(0);
if (!V1.getNode())
V1 = SrcVector;
- CanFold = SrcVector == V1 &&
- cast<ConstantSDNode>(Current.getOperand(1))->getZExtValue() == i;
+ CanFold = (SrcVector == V1) && (Current.getConstantOperandVal(1) == i);
}
if (!CanFold)
@@ -20944,54 +20916,62 @@ SDValue X86TargetLowering::LowerFLT_ROUNDS_(SDValue Op,
ISD::TRUNCATE : ISD::ZERO_EXTEND), DL, VT, RetVal);
}
+// Split an unary integer op into 2 half sized ops.
+static SDValue LowerVectorIntUnary(SDValue Op, SelectionDAG &DAG) {
+ MVT VT = Op.getSimpleValueType();
+ unsigned NumElems = VT.getVectorNumElements();
+ unsigned SizeInBits = VT.getSizeInBits();
+
+ // Extract the Lo/Hi vectors
+ SDLoc dl(Op);
+ SDValue Src = Op.getOperand(0);
+ SDValue Lo = extractSubVector(Src, 0, DAG, dl, SizeInBits / 2);
+ SDValue Hi = extractSubVector(Src, NumElems / 2, DAG, dl, SizeInBits / 2);
+
+ MVT EltVT = VT.getVectorElementType();
+ MVT NewVT = MVT::getVectorVT(EltVT, NumElems / 2);
+ return DAG.getNode(ISD::CONCAT_VECTORS, dl, VT,
+ DAG.getNode(Op.getOpcode(), dl, NewVT, Lo),
+ DAG.getNode(Op.getOpcode(), dl, NewVT, Hi));
+}
+
+// Decompose 256-bit ops into smaller 128-bit ops.
+static SDValue Lower256IntUnary(SDValue Op, SelectionDAG &DAG) {
+ assert(Op.getSimpleValueType().is256BitVector() &&
+ Op.getSimpleValueType().isInteger() &&
+ "Only handle AVX 256-bit vector integer operation");
+ return LowerVectorIntUnary(Op, DAG);
+}
+
+// Decompose 512-bit ops into smaller 256-bit ops.
+static SDValue Lower512IntUnary(SDValue Op, SelectionDAG &DAG) {
+ assert(Op.getSimpleValueType().is512BitVector() &&
+ Op.getSimpleValueType().isInteger() &&
+ "Only handle AVX 512-bit vector integer operation");
+ return LowerVectorIntUnary(Op, DAG);
+}
+
/// \brief Lower a vector CTLZ using native supported vector CTLZ instruction.
//
-// 1. i32/i64 128/256-bit vector (native support require VLX) are expended
-// to 512-bit vector.
-// 2. i8/i16 vector implemented using dword LZCNT vector instruction
-// ( sub(trunc(lzcnt(zext32(x)))) ). In case zext32(x) is illegal,
-// split the vector, perform operation on it's Lo a Hi part and
-// concatenate the results.
-static SDValue LowerVectorCTLZ_AVX512(SDValue Op, SelectionDAG &DAG) {
+// i8/i16 vector implemented using dword LZCNT vector instruction
+// ( sub(trunc(lzcnt(zext32(x)))) ). In case zext32(x) is illegal,
+// split the vector, perform operation on it's Lo a Hi part and
+// concatenate the results.
+static SDValue LowerVectorCTLZ_AVX512CDI(SDValue Op, SelectionDAG &DAG) {
assert(Op.getOpcode() == ISD::CTLZ);
SDLoc dl(Op);
MVT VT = Op.getSimpleValueType();
MVT EltVT = VT.getVectorElementType();
unsigned NumElems = VT.getVectorNumElements();
- if (EltVT == MVT::i64 || EltVT == MVT::i32) {
- // Extend to 512 bit vector.
- assert((VT.is256BitVector() || VT.is128BitVector()) &&
- "Unsupported value type for operation");
-
- MVT NewVT = MVT::getVectorVT(EltVT, 512 / VT.getScalarSizeInBits());
- SDValue Vec512 = DAG.getNode(ISD::INSERT_SUBVECTOR, dl, NewVT,
- DAG.getUNDEF(NewVT),
- Op.getOperand(0),
- DAG.getIntPtrConstant(0, dl));
- SDValue CtlzNode = DAG.getNode(ISD::CTLZ, dl, NewVT, Vec512);
-
- return DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, VT, CtlzNode,
- DAG.getIntPtrConstant(0, dl));
- }
-
assert((EltVT == MVT::i8 || EltVT == MVT::i16) &&
"Unsupported element type");
- if (16 < NumElems) {
- // Split vector, it's Lo and Hi parts will be handled in next iteration.
- SDValue Lo, Hi;
- std::tie(Lo, Hi) = DAG.SplitVector(Op.getOperand(0), dl);
- MVT OutVT = MVT::getVectorVT(EltVT, NumElems/2);
-
- Lo = DAG.getNode(ISD::CTLZ, dl, OutVT, Lo);
- Hi = DAG.getNode(ISD::CTLZ, dl, OutVT, Hi);
-
- return DAG.getNode(ISD::CONCAT_VECTORS, dl, VT, Lo, Hi);
- }
+ // Split vector, it's Lo and Hi parts will be handled in next iteration.
+ if (16 < NumElems)
+ return LowerVectorIntUnary(Op, DAG);
MVT NewVT = MVT::getVectorVT(MVT::i32, NumElems);
-
assert((NewVT.is256BitVector() || NewVT.is512BitVector()) &&
"Unsupported value type for operation");
@@ -21078,23 +21058,17 @@ static SDValue LowerVectorCTLZ(SDValue Op, const SDLoc &DL,
const X86Subtarget &Subtarget,
SelectionDAG &DAG) {
MVT VT = Op.getSimpleValueType();
- SDValue Op0 = Op.getOperand(0);
- if (Subtarget.hasAVX512())
- return LowerVectorCTLZ_AVX512(Op, DAG);
+ if (Subtarget.hasCDI())
+ return LowerVectorCTLZ_AVX512CDI(Op, DAG);
// Decompose 256-bit ops into smaller 128-bit ops.
- if (VT.is256BitVector() && !Subtarget.hasInt256()) {
- unsigned NumElems = VT.getVectorNumElements();
+ if (VT.is256BitVector() && !Subtarget.hasInt256())
+ return Lower256IntUnary(Op, DAG);
- // Extract each 128-bit vector, perform ctlz and concat the result.
- SDValue LHS = extract128BitVector(Op0, 0, DAG, DL);
- SDValue RHS = extract128BitVector(Op0, NumElems / 2, DAG, DL);
-
- return DAG.getNode(ISD::CONCAT_VECTORS, DL, VT,
- DAG.getNode(ISD::CTLZ, DL, LHS.getValueType(), LHS),
- DAG.getNode(ISD::CTLZ, DL, RHS.getValueType(), RHS));
- }
+ // Decompose 512-bit ops into smaller 256-bit ops.
+ if (VT.is512BitVector() && !Subtarget.hasBWI())
+ return Lower512IntUnary(Op, DAG);
assert(Subtarget.hasSSSE3() && "Expected SSSE3 support for PSHUFB");
return LowerVectorCTLZInRegLUT(Op, DL, Subtarget, DAG);
@@ -21258,19 +21232,7 @@ static SDValue LowerABS(SDValue Op, SelectionDAG &DAG) {
assert(Op.getSimpleValueType().is256BitVector() &&
Op.getSimpleValueType().isInteger() &&
"Only handle AVX 256-bit vector integer operation");
- MVT VT = Op.getSimpleValueType();
- unsigned NumElems = VT.getVectorNumElements();
-
- SDLoc dl(Op);
- SDValue Src = Op.getOperand(0);
- SDValue Lo = extract128BitVector(Src, 0, DAG, dl);
- SDValue Hi = extract128BitVector(Src, NumElems / 2, DAG, dl);
-
- MVT EltVT = VT.getVectorElementType();
- MVT NewVT = MVT::getVectorVT(EltVT, NumElems / 2);
- return DAG.getNode(ISD::CONCAT_VECTORS, dl, VT,
- DAG.getNode(ISD::ABS, dl, NewVT, Lo),
- DAG.getNode(ISD::ABS, dl, NewVT, Hi));
+ return Lower256IntUnary(Op, DAG);
}
static SDValue LowerMINMAX(SDValue Op, SelectionDAG &DAG) {
@@ -23049,29 +23011,13 @@ static SDValue LowerVectorCTPOP(SDValue Op, const X86Subtarget &Subtarget,
return LowerVectorCTPOPBitmath(Op0, DL, Subtarget, DAG);
}
- if (VT.is256BitVector() && !Subtarget.hasInt256()) {
- unsigned NumElems = VT.getVectorNumElements();
-
- // Extract each 128-bit vector, compute pop count and concat the result.
- SDValue LHS = extract128BitVector(Op0, 0, DAG, DL);
- SDValue RHS = extract128BitVector(Op0, NumElems / 2, DAG, DL);
-
- return DAG.getNode(ISD::CONCAT_VECTORS, DL, VT,
- LowerVectorCTPOPInRegLUT(LHS, DL, Subtarget, DAG),
- LowerVectorCTPOPInRegLUT(RHS, DL, Subtarget, DAG));
- }
-
- if (VT.is512BitVector() && !Subtarget.hasBWI()) {
- unsigned NumElems = VT.getVectorNumElements();
-
- // Extract each 256-bit vector, compute pop count and concat the result.
- SDValue LHS = extract256BitVector(Op0, 0, DAG, DL);
- SDValue RHS = extract256BitVector(Op0, NumElems / 2, DAG, DL);
+ // Decompose 256-bit ops into smaller 128-bit ops.
+ if (VT.is256BitVector() && !Subtarget.hasInt256())
+ return Lower256IntUnary(Op, DAG);
- return DAG.getNode(ISD::CONCAT_VECTORS, DL, VT,
- LowerVectorCTPOPInRegLUT(LHS, DL, Subtarget, DAG),
- LowerVectorCTPOPInRegLUT(RHS, DL, Subtarget, DAG));
- }
+ // Decompose 512-bit ops into smaller 256-bit ops.
+ if (VT.is512BitVector() && !Subtarget.hasBWI())
+ return Lower512IntUnary(Op, DAG);
return LowerVectorCTPOPInRegLUT(Op0, DL, Subtarget, DAG);
}
@@ -23098,20 +23044,12 @@ static SDValue LowerBITREVERSE_XOP(SDValue Op, SelectionDAG &DAG) {
DAG.getIntPtrConstant(0, DL));
}
- MVT SVT = VT.getVectorElementType();
int NumElts = VT.getVectorNumElements();
int ScalarSizeInBytes = VT.getScalarSizeInBits() / 8;
// Decompose 256-bit ops into smaller 128-bit ops.
- if (VT.is256BitVector()) {
- SDValue Lo = extract128BitVector(In, 0, DAG, DL);
- SDValue Hi = extract128BitVector(In, NumElts / 2, DAG, DL);
-
- MVT HalfVT = MVT::getVectorVT(SVT, NumElts / 2);
- return DAG.getNode(ISD::CONCAT_VECTORS, DL, VT,
- DAG.getNode(ISD::BITREVERSE, DL, HalfVT, Lo),
- DAG.getNode(ISD::BITREVERSE, DL, HalfVT, Hi));
- }
+ if (VT.is256BitVector())
+ return Lower256IntUnary(Op, DAG);
assert(VT.is128BitVector() &&
"Only 128-bit vector bitreverse lowering supported.");
@@ -23152,14 +23090,8 @@ static SDValue LowerBITREVERSE(SDValue Op, const X86Subtarget &Subtarget,
"Only byte vector BITREVERSE supported");
// Decompose 256-bit ops into smaller 128-bit ops on pre-AVX2.
- if (VT.is256BitVector() && !Subtarget.hasInt256()) {
- MVT HalfVT = MVT::getVectorVT(MVT::i8, NumElts / 2);
- SDValue Lo = extract128BitVector(In, 0, DAG, DL);
- SDValue Hi = extract128BitVector(In, NumElts / 2, DAG, DL);
- Lo = DAG.getNode(ISD::BITREVERSE, DL, HalfVT, Lo);
- Hi = DAG.getNode(ISD::BITREVERSE, DL, HalfVT, Hi);
- return DAG.getNode(ISD::CONCAT_VECTORS, DL, VT, Lo, Hi);
- }
+ if (VT.is256BitVector() && !Subtarget.hasInt256())
+ return Lower256IntUnary(Op, DAG);
// Perform BITREVERSE using PSHUFB lookups. Each byte is split into
// two nibbles and a PSHUFB lookup to find the bitreverse of each
@@ -26585,6 +26517,10 @@ X86TargetLowering::EmitInstrWithCustomInserter(MachineInstr &MI,
case TargetOpcode::STACKMAP:
case TargetOpcode::PATCHPOINT:
return emitPatchPoint(MI, BB);
+
+ case TargetOpcode::PATCHABLE_EVENT_CALL:
+ // Do nothing here, handle in xray instrumentation pass.
+ return BB;
case X86::LCMPXCHG8B: {
const X86RegisterInfo *TRI = Subtarget.getRegisterInfo();
@@ -26667,7 +26603,7 @@ void X86TargetLowering::computeKnownBitsForTargetNode(const SDValue Op,
"Should use MaskedValueIsZero if you don't know whether Op"
" is a target node!");
- Known.Zero.clearAllBits(); Known.One.clearAllBits();
+ Known.resetAll();
switch (Opc) {
default: break;
case X86ISD::ADD:
@@ -26697,7 +26633,7 @@ void X86TargetLowering::computeKnownBitsForTargetNode(const SDValue Op,
case X86ISD::VSRLI: {
if (auto *ShiftImm = dyn_cast<ConstantSDNode>(Op.getOperand(1))) {
if (ShiftImm->getAPIntValue().uge(VT.getScalarSizeInBits())) {
- Known.Zero.setAllBits();
+ Known.setAllZero();
break;
}
@@ -26729,8 +26665,7 @@ void X86TargetLowering::computeKnownBitsForTargetNode(const SDValue Op,
Known = KnownBits(InBitWidth);
APInt DemandedSrcElts = APInt::getLowBitsSet(InNumElts, NumElts);
DAG.computeKnownBits(N0, Known, DemandedSrcElts, Depth + 1);
- Known.One = Known.One.zext(BitWidth);
- Known.Zero = Known.Zero.zext(BitWidth);
+ Known = Known.zext(BitWidth);
Known.Zero.setBitsFrom(InBitWidth);
break;
}
@@ -31671,10 +31606,9 @@ static SDValue combineLogicBlendIntoPBLENDV(SDNode *N, SelectionDAG &DAG,
if (auto *AmtBV = dyn_cast<BuildVectorSDNode>(Mask.getOperand(1)))
if (auto *AmtConst = AmtBV->getConstantSplatNode())
SraAmt = AmtConst->getZExtValue();
- } else if (Mask.getOpcode() == X86ISD::VSRAI) {
- SDValue SraC = Mask.getOperand(1);
- SraAmt = cast<ConstantSDNode>(SraC)->getZExtValue();
- }
+ } else if (Mask.getOpcode() == X86ISD::VSRAI)
+ SraAmt = Mask.getConstantOperandVal(1);
+
if ((SraAmt + 1) != EltBits)
return SDValue();
@@ -31708,7 +31642,9 @@ static SDValue combineLogicBlendIntoPBLENDV(SDNode *N, SelectionDAG &DAG,
V = Y;
if (V) {
- assert(EltBits == 8 || EltBits == 16 || EltBits == 32);
+ if (EltBits != 8 && EltBits != 16 && EltBits != 32)
+ return SDValue();
+
SDValue SubOp1 = DAG.getNode(ISD::XOR, DL, MaskVT, V, Mask);
SDValue SubOp2 = Mask;
@@ -34488,8 +34424,7 @@ static SDValue combineX86ADD(SDNode *N, SelectionDAG &DAG,
if (Carry.getOpcode() == ISD::SETCC ||
Carry.getOpcode() == X86ISD::SETCC ||
Carry.getOpcode() == X86ISD::SETCC_CARRY) {
- auto *Cond = cast<ConstantSDNode>(Carry.getOperand(0));
- if (Cond->getZExtValue() == X86::COND_B)
+ if (Carry.getConstantOperandVal(0) == X86::COND_B)
return DCI.CombineTo(N, SDValue(N, 0), Carry.getOperand(1));
}
}
diff --git a/lib/Target/X86/X86InstrAVX512.td b/lib/Target/X86/X86InstrAVX512.td
index c38c13bb9757..71d395244b4a 100644
--- a/lib/Target/X86/X86InstrAVX512.td
+++ b/lib/Target/X86/X86InstrAVX512.td
@@ -8631,6 +8631,20 @@ multiclass avx512_unary_rm_vl_all<bits<8> opc_b, bits<8> opc_w,
defm VPABS : avx512_unary_rm_vl_all<0x1C, 0x1D, 0x1E, 0x1F, "vpabs", abs>;
+// VPABS: Use 512bit version to implement 128/256 bit in case NoVLX.
+let Predicates = [HasAVX512, NoVLX] in {
+ def : Pat<(v4i64 (abs VR256X:$src)),
+ (EXTRACT_SUBREG
+ (VPABSQZrr
+ (INSERT_SUBREG (v8i64 (IMPLICIT_DEF)), VR256X:$src, sub_ymm)),
+ sub_ymm)>;
+ def : Pat<(v2i64 (abs VR128X:$src)),
+ (EXTRACT_SUBREG
+ (VPABSQZrr
+ (INSERT_SUBREG (v8i64 (IMPLICIT_DEF)), VR128X:$src, sub_xmm)),
+ sub_xmm)>;
+}
+
multiclass avx512_ctlz<bits<8> opc, string OpcodeStr, Predicate prd>{
defm NAME : avx512_unary_rm_vl_dq<opc, opc, OpcodeStr, ctlz, prd>;
@@ -8639,6 +8653,31 @@ multiclass avx512_ctlz<bits<8> opc, string OpcodeStr, Predicate prd>{
defm VPLZCNT : avx512_ctlz<0x44, "vplzcnt", HasCDI>;
defm VPCONFLICT : avx512_unary_rm_vl_dq<0xC4, 0xC4, "vpconflict", X86Conflict, HasCDI>;
+// VPLZCNT: Use 512bit version to implement 128/256 bit in case NoVLX.
+let Predicates = [HasCDI, NoVLX] in {
+ def : Pat<(v4i64 (ctlz VR256X:$src)),
+ (EXTRACT_SUBREG
+ (VPLZCNTQZrr
+ (INSERT_SUBREG (v8i64 (IMPLICIT_DEF)), VR256X:$src, sub_ymm)),
+ sub_ymm)>;
+ def : Pat<(v2i64 (ctlz VR128X:$src)),
+ (EXTRACT_SUBREG
+ (VPLZCNTQZrr
+ (INSERT_SUBREG (v8i64 (IMPLICIT_DEF)), VR128X:$src, sub_xmm)),
+ sub_xmm)>;
+
+ def : Pat<(v8i32 (ctlz VR256X:$src)),
+ (EXTRACT_SUBREG
+ (VPLZCNTDZrr
+ (INSERT_SUBREG (v16i32 (IMPLICIT_DEF)), VR256X:$src, sub_ymm)),
+ sub_ymm)>;
+ def : Pat<(v4i32 (ctlz VR128X:$src)),
+ (EXTRACT_SUBREG
+ (VPLZCNTDZrr
+ (INSERT_SUBREG (v16i32 (IMPLICIT_DEF)), VR128X:$src, sub_xmm)),
+ sub_xmm)>;
+}
+
//===---------------------------------------------------------------------===//
// Replicate Single FP - MOVSHDUP and MOVSLDUP
//===---------------------------------------------------------------------===//
diff --git a/lib/Target/X86/X86InstrInfo.td b/lib/Target/X86/X86InstrInfo.td
index cdf7ce19cdc8..902b0c2c04e3 100644
--- a/lib/Target/X86/X86InstrInfo.td
+++ b/lib/Target/X86/X86InstrInfo.td
@@ -1995,11 +1995,11 @@ def REX64_PREFIX : I<0x48, RawFrm, (outs), (ins), "rex64", []>,
Requires<[In64BitMode]>;
// Data16 instruction prefix
-def DATA16_PREFIX : I<0x66, RawFrm, (outs), (ins), "data16", []>,
+def DATA16_PREFIX : I<0x66, RawFrm, (outs), (ins), "data16", []>,
Requires<[Not16BitMode]>;
// Data instruction prefix
-def DATA32_PREFIX : I<0x66, RawFrm, (outs), (ins), "data32", []>,
+def DATA32_PREFIX : I<0x66, RawFrm, (outs), (ins), "data32", []>,
Requires<[In16BitMode]>;
// Repeat string operation instruction prefixes
@@ -2518,7 +2518,7 @@ let SchedRW = [ WriteSystem ] in {
}
let Uses = [ ECX, EAX, EBX ] in {
- def MWAITXrrr : I<0x01, MRM_FB, (outs), (ins), "mwaitx",
+ def MWAITXrrr : I<0x01, MRM_FB, (outs), (ins), "mwaitx",
[(int_x86_mwaitx ECX, EAX, EBX)], IIC_SSE_MWAITX>,
TB, Requires<[ HasMWAITX ]>;
}
diff --git a/lib/Target/X86/X86InstrSSE.td b/lib/Target/X86/X86InstrSSE.td
index f22a50200c9a..48da2fa607af 100644
--- a/lib/Target/X86/X86InstrSSE.td
+++ b/lib/Target/X86/X86InstrSSE.td
@@ -6718,22 +6718,23 @@ let Constraints = "$src1 = $dst" in {
SSE_INTMUL_ITINS_P, 1>;
}
-let Predicates = [HasAVX, NoVLX] in {
+let Predicates = [HasAVX, NoVLX] in
defm VPMULLD : SS48I_binop_rm<0x40, "vpmulld", mul, v4i32, VR128,
loadv2i64, i128mem, 0, SSE_PMULLD_ITINS>,
VEX_4V, VEX_WIG;
+let Predicates = [HasAVX] in
defm VPCMPEQQ : SS48I_binop_rm<0x29, "vpcmpeqq", X86pcmpeq, v2i64, VR128,
loadv2i64, i128mem, 0, SSE_INTALU_ITINS_P>,
VEX_4V, VEX_WIG;
-}
-let Predicates = [HasAVX2] in {
+
+let Predicates = [HasAVX2, NoVLX] in
defm VPMULLDY : SS48I_binop_rm<0x40, "vpmulld", mul, v8i32, VR256,
loadv4i64, i256mem, 0, SSE_PMULLD_ITINS>,
VEX_4V, VEX_L, VEX_WIG;
+let Predicates = [HasAVX2] in
defm VPCMPEQQY : SS48I_binop_rm<0x29, "vpcmpeqq", X86pcmpeq, v4i64, VR256,
loadv4i64, i256mem, 0, SSE_INTALU_ITINS_P>,
VEX_4V, VEX_L, VEX_WIG;
-}
let Constraints = "$src1 = $dst" in {
defm PMULLD : SS48I_binop_rm<0x40, "pmulld", mul, v4i32, VR128,
diff --git a/lib/Target/X86/X86InstructionSelector.cpp b/lib/Target/X86/X86InstructionSelector.cpp
index 38f7bc0af5c7..d65eb1de8d09 100644
--- a/lib/Target/X86/X86InstructionSelector.cpp
+++ b/lib/Target/X86/X86InstructionSelector.cpp
@@ -65,8 +65,8 @@ private:
MachineFunction &MF) const;
bool selectLoadStoreOp(MachineInstr &I, MachineRegisterInfo &MRI,
MachineFunction &MF) const;
- bool selectFrameIndex(MachineInstr &I, MachineRegisterInfo &MRI,
- MachineFunction &MF) const;
+ bool selectFrameIndexOrGep(MachineInstr &I, MachineRegisterInfo &MRI,
+ MachineFunction &MF) const;
bool selectConstant(MachineInstr &I, MachineRegisterInfo &MRI,
MachineFunction &MF) const;
bool selectTrunc(MachineInstr &I, MachineRegisterInfo &MRI,
@@ -235,7 +235,7 @@ bool X86InstructionSelector::select(MachineInstr &I) const {
return true;
if (selectLoadStoreOp(I, MRI, MF))
return true;
- if (selectFrameIndex(I, MRI, MF))
+ if (selectFrameIndexOrGep(I, MRI, MF))
return true;
if (selectConstant(I, MRI, MF))
return true;
@@ -427,27 +427,37 @@ bool X86InstructionSelector::selectLoadStoreOp(MachineInstr &I,
return constrainSelectedInstRegOperands(I, TII, TRI, RBI);
}
-bool X86InstructionSelector::selectFrameIndex(MachineInstr &I,
- MachineRegisterInfo &MRI,
- MachineFunction &MF) const {
- if (I.getOpcode() != TargetOpcode::G_FRAME_INDEX)
+bool X86InstructionSelector::selectFrameIndexOrGep(MachineInstr &I,
+ MachineRegisterInfo &MRI,
+ MachineFunction &MF) const {
+ unsigned Opc = I.getOpcode();
+
+ if (Opc != TargetOpcode::G_FRAME_INDEX && Opc != TargetOpcode::G_GEP)
return false;
const unsigned DefReg = I.getOperand(0).getReg();
LLT Ty = MRI.getType(DefReg);
- // Use LEA to calculate frame index.
+ // Use LEA to calculate frame index and GEP
unsigned NewOpc;
if (Ty == LLT::pointer(0, 64))
NewOpc = X86::LEA64r;
else if (Ty == LLT::pointer(0, 32))
NewOpc = STI.isTarget64BitILP32() ? X86::LEA64_32r : X86::LEA32r;
else
- llvm_unreachable("Can't select G_FRAME_INDEX, unsupported type.");
+ llvm_unreachable("Can't select G_FRAME_INDEX/G_GEP, unsupported type.");
I.setDesc(TII.get(NewOpc));
MachineInstrBuilder MIB(MF, I);
- addOffset(MIB, 0);
+
+ if (Opc == TargetOpcode::G_FRAME_INDEX) {
+ addOffset(MIB, 0);
+ } else {
+ MachineOperand &InxOp = I.getOperand(2);
+ I.addOperand(InxOp); // set IndexReg
+ InxOp.ChangeToImmediate(1); // set Scale
+ MIB.addImm(0).addReg(0);
+ }
return constrainSelectedInstRegOperands(I, TII, TRI, RBI);
}
diff --git a/lib/Target/X86/X86LegalizerInfo.cpp b/lib/Target/X86/X86LegalizerInfo.cpp
index a437f6bf4714..4f5e70414aa9 100644
--- a/lib/Target/X86/X86LegalizerInfo.cpp
+++ b/lib/Target/X86/X86LegalizerInfo.cpp
@@ -34,6 +34,11 @@ X86LegalizerInfo::X86LegalizerInfo(const X86Subtarget &STI,
setLegalizerInfo64bit();
setLegalizerInfoSSE1();
setLegalizerInfoSSE2();
+ setLegalizerInfoSSE41();
+ setLegalizerInfoAVX2();
+ setLegalizerInfoAVX512();
+ setLegalizerInfoAVX512DQ();
+ setLegalizerInfoAVX512BW();
computeTables();
}
@@ -50,7 +55,7 @@ void X86LegalizerInfo::setLegalizerInfo32bit() {
const LLT s32 = LLT::scalar(32);
const LLT s64 = LLT::scalar(64);
- for (unsigned BinOp : {G_ADD, G_SUB})
+ for (unsigned BinOp : {G_ADD, G_SUB, G_MUL})
for (auto Ty : {s8, s16, s32})
setAction({BinOp, Ty}, Legal);
@@ -65,6 +70,12 @@ void X86LegalizerInfo::setLegalizerInfo32bit() {
// Pointer-handling
setAction({G_FRAME_INDEX, p0}, Legal);
+ setAction({G_GEP, p0}, Legal);
+ setAction({G_GEP, 1, s32}, Legal);
+
+ for (auto Ty : {s1, s8, s16})
+ setAction({G_GEP, 1, Ty}, WidenScalar);
+
// Constants
for (auto Ty : {s8, s16, s32, p0})
setAction({TargetOpcode::G_CONSTANT, Ty}, Legal);
@@ -94,7 +105,7 @@ void X86LegalizerInfo::setLegalizerInfo64bit() {
const LLT s32 = LLT::scalar(32);
const LLT s64 = LLT::scalar(64);
- for (unsigned BinOp : {G_ADD, G_SUB})
+ for (unsigned BinOp : {G_ADD, G_SUB, G_MUL})
for (auto Ty : {s8, s16, s32, s64})
setAction({BinOp, Ty}, Legal);
@@ -109,6 +120,13 @@ void X86LegalizerInfo::setLegalizerInfo64bit() {
// Pointer-handling
setAction({G_FRAME_INDEX, p0}, Legal);
+ setAction({G_GEP, p0}, Legal);
+ setAction({G_GEP, 1, s32}, Legal);
+ setAction({G_GEP, 1, s64}, Legal);
+
+ for (auto Ty : {s1, s8, s16})
+ setAction({G_GEP, 1, Ty}, WidenScalar);
+
// Constants
for (auto Ty : {s8, s16, s32, s64, p0})
setAction({TargetOpcode::G_CONSTANT, Ty}, Legal);
@@ -149,6 +167,7 @@ void X86LegalizerInfo::setLegalizerInfoSSE2() {
return;
const LLT s64 = LLT::scalar(64);
+ const LLT v8s16 = LLT::vector(8, 16);
const LLT v4s32 = LLT::vector(4, 32);
const LLT v2s64 = LLT::vector(2, 64);
@@ -159,4 +178,83 @@ void X86LegalizerInfo::setLegalizerInfoSSE2() {
for (unsigned BinOp : {G_ADD, G_SUB})
for (auto Ty : {v4s32})
setAction({BinOp, Ty}, Legal);
+
+ setAction({G_MUL, v8s16}, Legal);
+}
+
+void X86LegalizerInfo::setLegalizerInfoSSE41() {
+ if (!Subtarget.hasSSE41())
+ return;
+
+ const LLT v4s32 = LLT::vector(4, 32);
+
+ setAction({G_MUL, v4s32}, Legal);
+}
+
+void X86LegalizerInfo::setLegalizerInfoAVX2() {
+ if (!Subtarget.hasAVX2())
+ return;
+
+ const LLT v16s16 = LLT::vector(16, 16);
+ const LLT v8s32 = LLT::vector(8, 32);
+
+ for (auto Ty : {v16s16, v8s32})
+ setAction({G_MUL, Ty}, Legal);
+}
+
+void X86LegalizerInfo::setLegalizerInfoAVX512() {
+ if (!Subtarget.hasAVX512())
+ return;
+
+ const LLT v16s32 = LLT::vector(16, 32);
+
+ setAction({G_MUL, v16s32}, Legal);
+
+ /************ VLX *******************/
+ if (!Subtarget.hasVLX())
+ return;
+
+ const LLT v4s32 = LLT::vector(4, 32);
+ const LLT v8s32 = LLT::vector(8, 32);
+
+ for (auto Ty : {v4s32, v8s32})
+ setAction({G_MUL, Ty}, Legal);
+}
+
+void X86LegalizerInfo::setLegalizerInfoAVX512DQ() {
+ if (!(Subtarget.hasAVX512() && Subtarget.hasDQI()))
+ return;
+
+ const LLT v8s64 = LLT::vector(8, 64);
+
+ setAction({G_MUL, v8s64}, Legal);
+
+ /************ VLX *******************/
+ if (!Subtarget.hasVLX())
+ return;
+
+ const LLT v2s64 = LLT::vector(2, 64);
+ const LLT v4s64 = LLT::vector(4, 64);
+
+ for (auto Ty : {v2s64, v4s64})
+ setAction({G_MUL, Ty}, Legal);
+}
+
+void X86LegalizerInfo::setLegalizerInfoAVX512BW() {
+ if (!(Subtarget.hasAVX512() && Subtarget.hasBWI()))
+ return;
+
+ const LLT v32s16 = LLT::vector(32, 16);
+
+ setAction({G_MUL, v32s16}, Legal);
+
+ /************ VLX *******************/
+ if (!Subtarget.hasVLX())
+ return;
+
+ const LLT v8s16 = LLT::vector(8, 16);
+ const LLT v16s16 = LLT::vector(16, 16);
+
+ for (auto Ty : {v8s16, v16s16})
+ setAction({G_MUL, Ty}, Legal);
}
diff --git a/lib/Target/X86/X86LegalizerInfo.h b/lib/Target/X86/X86LegalizerInfo.h
index 3f00898b4232..ab5405a70427 100644
--- a/lib/Target/X86/X86LegalizerInfo.h
+++ b/lib/Target/X86/X86LegalizerInfo.h
@@ -38,6 +38,11 @@ private:
void setLegalizerInfo64bit();
void setLegalizerInfoSSE1();
void setLegalizerInfoSSE2();
+ void setLegalizerInfoSSE41();
+ void setLegalizerInfoAVX2();
+ void setLegalizerInfoAVX512();
+ void setLegalizerInfoAVX512DQ();
+ void setLegalizerInfoAVX512BW();
};
} // namespace llvm
#endif
diff --git a/lib/Target/X86/X86MCInstLower.cpp b/lib/Target/X86/X86MCInstLower.cpp
index 550e3543a71e..598d88d8b9c3 100644
--- a/lib/Target/X86/X86MCInstLower.cpp
+++ b/lib/Target/X86/X86MCInstLower.cpp
@@ -1040,6 +1040,83 @@ void X86AsmPrinter::LowerPATCHPOINT(const MachineInstr &MI,
getSubtargetInfo());
}
+void X86AsmPrinter::LowerPATCHABLE_EVENT_CALL(const MachineInstr &MI,
+ X86MCInstLower &MCIL) {
+ assert(Subtarget->is64Bit() && "XRay custom events only suports X86-64");
+
+ // We want to emit the following pattern, which follows the x86 calling
+ // convention to prepare for the trampoline call to be patched in.
+ //
+ // <args placement according SysV64 calling convention>
+ // .p2align 1, ...
+ // .Lxray_event_sled_N:
+ // jmp +N // jump across the call instruction
+ // callq __xray_CustomEvent // force relocation to symbol
+ // <args cleanup, jump to here>
+ //
+ // The relative jump needs to jump forward 24 bytes:
+ // 10 (args) + 5 (nops) + 9 (cleanup)
+ //
+ // After patching, it would look something like:
+ //
+ // nopw (2-byte nop)
+ // callq __xrayCustomEvent // already lowered
+ //
+ // ---
+ // First we emit the label and the jump.
+ auto CurSled = OutContext.createTempSymbol("xray_event_sled_", true);
+ OutStreamer->AddComment("# XRay Custom Event Log");
+ OutStreamer->EmitCodeAlignment(2);
+ OutStreamer->EmitLabel(CurSled);
+
+ // Use a two-byte `jmp`. This version of JMP takes an 8-bit relative offset as
+ // an operand (computed as an offset from the jmp instruction).
+ // FIXME: Find another less hacky way do force the relative jump.
+ OutStreamer->EmitBytes("\xeb\x14");
+
+ // The default C calling convention will place two arguments into %rcx and
+ // %rdx -- so we only work with those.
+ unsigned UsedRegs[] = {X86::RDI, X86::RSI, X86::RAX};
+
+ // Because we will use %rax, we preserve that across the call.
+ EmitAndCountInstruction(MCInstBuilder(X86::PUSH64r).addReg(X86::RAX));
+
+ // Then we put the operands in the %rdi and %rsi registers.
+ for (unsigned I = 0; I < MI.getNumOperands(); ++I)
+ if (auto Op = MCIL.LowerMachineOperand(&MI, MI.getOperand(I))) {
+ if (Op->isImm())
+ EmitAndCountInstruction(MCInstBuilder(X86::MOV64ri)
+ .addReg(UsedRegs[I])
+ .addImm(Op->getImm()));
+ else if (Op->isReg()) {
+ if (Op->getReg() != UsedRegs[I])
+ EmitAndCountInstruction(MCInstBuilder(X86::MOV64rr)
+ .addReg(UsedRegs[I])
+ .addReg(Op->getReg()));
+ else
+ EmitNops(*OutStreamer, 3, Subtarget->is64Bit(), getSubtargetInfo());
+ }
+ }
+
+ // We emit a hard dependency on the __xray_CustomEvent symbol, which is the
+ // name of the trampoline to be implemented by the XRay runtime. We put this
+ // explicitly in the %rax register.
+ auto TSym = OutContext.getOrCreateSymbol("__xray_CustomEvent");
+ MachineOperand TOp = MachineOperand::CreateMCSymbol(TSym);
+ EmitAndCountInstruction(MCInstBuilder(X86::MOV64ri)
+ .addReg(X86::RAX)
+ .addOperand(MCIL.LowerSymbolOperand(TOp, TSym)));
+
+ // Emit the call instruction.
+ EmitAndCountInstruction(MCInstBuilder(X86::CALL64r).addReg(X86::RAX));
+
+ // Restore caller-saved and used registers.
+ OutStreamer->AddComment("xray custom event end.");
+ EmitAndCountInstruction(MCInstBuilder(X86::POP64r).addReg(X86::RAX));
+
+ recordSled(CurSled, MI, SledKind::CUSTOM_EVENT);
+}
+
void X86AsmPrinter::LowerPATCHABLE_FUNCTION_ENTER(const MachineInstr &MI,
X86MCInstLower &MCIL) {
// We want to emit the following pattern:
@@ -1415,6 +1492,9 @@ void X86AsmPrinter::EmitInstruction(const MachineInstr *MI) {
case TargetOpcode::PATCHABLE_TAIL_CALL:
return LowerPATCHABLE_TAIL_CALL(*MI, MCInstLowering);
+
+ case TargetOpcode::PATCHABLE_EVENT_CALL:
+ return LowerPATCHABLE_EVENT_CALL(*MI, MCInstLowering);
case X86::MORESTACK_RET:
EmitAndCountInstruction(MCInstBuilder(getRetOpcode(*Subtarget)));
diff --git a/lib/Target/X86/X86OptimizeLEAs.cpp b/lib/Target/X86/X86OptimizeLEAs.cpp
index 7be0a7fd4067..aabbf67a16b6 100644
--- a/lib/Target/X86/X86OptimizeLEAs.cpp
+++ b/lib/Target/X86/X86OptimizeLEAs.cpp
@@ -223,8 +223,6 @@ public:
StringRef getPassName() const override { return "X86 LEA Optimize"; }
- bool doInitialization(Module &M) override;
-
/// \brief Loop over all of the basic blocks, replacing address
/// calculations in load and store instructions, if it's already
/// been calculated by LEA. Also, remove redundant LEAs.
@@ -280,7 +278,6 @@ private:
MachineRegisterInfo *MRI;
const X86InstrInfo *TII;
const X86RegisterInfo *TRI;
- Module *TheModule;
static char ID;
};
@@ -649,11 +646,6 @@ bool OptimizeLEAPass::removeRedundantLEAs(MemOpMap &LEAs) {
return Changed;
}
-bool OptimizeLEAPass::doInitialization(Module &M) {
- TheModule = &M;
- return false;
-}
-
bool OptimizeLEAPass::runOnMachineFunction(MachineFunction &MF) {
bool Changed = false;
diff --git a/lib/Target/X86/X86RegisterBankInfo.cpp b/lib/Target/X86/X86RegisterBankInfo.cpp
index 0f8a750a0235..efd3df26dd42 100644
--- a/lib/Target/X86/X86RegisterBankInfo.cpp
+++ b/lib/Target/X86/X86RegisterBankInfo.cpp
@@ -139,8 +139,9 @@ bool X86RegisterBankInfo::getInstrValueMapping(
return true;
}
-RegisterBankInfo::InstructionMapping
-X86RegisterBankInfo::getSameOperandsMapping(const MachineInstr &MI, bool isFP) {
+const RegisterBankInfo::InstructionMapping &
+X86RegisterBankInfo::getSameOperandsMapping(const MachineInstr &MI,
+ bool isFP) const {
const MachineFunction &MF = *MI.getParent()->getParent();
const MachineRegisterInfo &MRI = MF.getRegInfo();
@@ -152,10 +153,10 @@ X86RegisterBankInfo::getSameOperandsMapping(const MachineInstr &MI, bool isFP) {
llvm_unreachable("Unsupported operand mapping yet.");
auto Mapping = getValueMapping(getPartialMappingIdx(Ty, isFP), 3);
- return InstructionMapping{DefaultMappingID, 1, Mapping, NumOperands};
+ return getInstructionMapping(DefaultMappingID, 1, Mapping, NumOperands);
}
-RegisterBankInfo::InstructionMapping
+const RegisterBankInfo::InstructionMapping &
X86RegisterBankInfo::getInstrMapping(const MachineInstr &MI) const {
const MachineFunction &MF = *MI.getParent()->getParent();
const MachineRegisterInfo &MRI = MF.getRegInfo();
@@ -164,7 +165,7 @@ X86RegisterBankInfo::getInstrMapping(const MachineInstr &MI) const {
// Try the default logic for non-generic instructions that are either copies
// or already have some operands assigned to banks.
if (!isPreISelGenericOpcode(Opc)) {
- InstructionMapping Mapping = getInstrMappingImpl(MI);
+ const InstructionMapping &Mapping = getInstrMappingImpl(MI);
if (Mapping.isValid())
return Mapping;
}
@@ -193,10 +194,10 @@ X86RegisterBankInfo::getInstrMapping(const MachineInstr &MI) const {
// Finally construct the computed mapping.
SmallVector<const ValueMapping *, 8> OpdsMapping(NumOperands);
if (!getInstrValueMapping(MI, OpRegBankIdx, OpdsMapping))
- return InstructionMapping();
+ return getInvalidInstructionMapping();
- return InstructionMapping{DefaultMappingID, /* Cost */ 1,
- getOperandsMapping(OpdsMapping), NumOperands};
+ return getInstructionMapping(DefaultMappingID, /* Cost */ 1,
+ getOperandsMapping(OpdsMapping), NumOperands);
}
void X86RegisterBankInfo::applyMappingImpl(
@@ -231,10 +232,10 @@ X86RegisterBankInfo::getInstrAlternativeMappings(const MachineInstr &MI) const {
if (!getInstrValueMapping(MI, OpRegBankIdx, OpdsMapping))
break;
- RegisterBankInfo::InstructionMapping Mapping = InstructionMapping{
- /*ID*/ 1, /*Cost*/ 1, getOperandsMapping(OpdsMapping), NumOperands};
+ const RegisterBankInfo::InstructionMapping &Mapping = getInstructionMapping(
+ /*ID*/ 1, /*Cost*/ 1, getOperandsMapping(OpdsMapping), NumOperands);
InstructionMappings AltMappings;
- AltMappings.emplace_back(std::move(Mapping));
+ AltMappings.push_back(&Mapping);
return AltMappings;
}
default:
diff --git a/lib/Target/X86/X86RegisterBankInfo.h b/lib/Target/X86/X86RegisterBankInfo.h
index a1e01a9ab949..e227880427f3 100644
--- a/lib/Target/X86/X86RegisterBankInfo.h
+++ b/lib/Target/X86/X86RegisterBankInfo.h
@@ -46,8 +46,8 @@ private:
/// Get an instruction mapping.
/// \return An InstructionMappings with a statically allocated
/// OperandsMapping.
- static InstructionMapping getSameOperandsMapping(const MachineInstr &MI,
- bool isFP);
+ const InstructionMapping &getSameOperandsMapping(const MachineInstr &MI,
+ bool isFP) const;
/// Track the bank of each instruction operand(register)
static void
@@ -74,7 +74,8 @@ public:
/// See RegisterBankInfo::applyMapping.
void applyMappingImpl(const OperandsMapper &OpdMapper) const override;
- InstructionMapping getInstrMapping(const MachineInstr &MI) const override;
+ const InstructionMapping &
+ getInstrMapping(const MachineInstr &MI) const override;
};
} // namespace llvm
diff --git a/lib/Target/X86/X86Subtarget.cpp b/lib/Target/X86/X86Subtarget.cpp
index 9ab751e2b002..d66d39dcee17 100644
--- a/lib/Target/X86/X86Subtarget.cpp
+++ b/lib/Target/X86/X86Subtarget.cpp
@@ -139,12 +139,18 @@ X86Subtarget::classifyGlobalFunctionReference(const GlobalValue *GV,
return X86II::MO_NO_FLAG;
assert(!isTargetCOFF());
+ const Function *F = dyn_cast_or_null<Function>(GV);
- if (isTargetELF())
+ if (isTargetELF()) {
+ if (is64Bit() && F && (CallingConv::X86_RegCall == F->getCallingConv()))
+ // According to psABI, PLT stub clobbers XMM8-XMM15.
+ // In Regcall calling convention those registers are used for passing
+ // parameters. Thus we need to prevent lazy binding in Regcall.
+ return X86II::MO_GOTPCREL;
return X86II::MO_PLT;
+ }
if (is64Bit()) {
- auto *F = dyn_cast_or_null<Function>(GV);
if (F && F->hasFnAttribute(Attribute::NonLazyBind))
// If the function is marked as non-lazy, generate an indirect call
// which loads from the GOT directly. This avoids runtime overhead
diff --git a/lib/Target/X86/X86TargetTransformInfo.cpp b/lib/Target/X86/X86TargetTransformInfo.cpp
index b742fb472372..f3b619a2956a 100644
--- a/lib/Target/X86/X86TargetTransformInfo.cpp
+++ b/lib/Target/X86/X86TargetTransformInfo.cpp
@@ -1426,25 +1426,25 @@ int X86TTIImpl::getIntrinsicInstrCost(Intrinsic::ID IID, Type *RetTy,
{ ISD::FSQRT, MVT::v4f64, 28 }, // Haswell from http://www.agner.org/
};
static const CostTblEntry AVX1CostTbl[] = {
- { ISD::BITREVERSE, MVT::v4i64, 10 },
- { ISD::BITREVERSE, MVT::v8i32, 10 },
- { ISD::BITREVERSE, MVT::v16i16, 10 },
- { ISD::BITREVERSE, MVT::v32i8, 10 },
+ { ISD::BITREVERSE, MVT::v4i64, 12 }, // 2 x 128-bit Op + extract/insert
+ { ISD::BITREVERSE, MVT::v8i32, 12 }, // 2 x 128-bit Op + extract/insert
+ { ISD::BITREVERSE, MVT::v16i16, 12 }, // 2 x 128-bit Op + extract/insert
+ { ISD::BITREVERSE, MVT::v32i8, 12 }, // 2 x 128-bit Op + extract/insert
{ ISD::BSWAP, MVT::v4i64, 4 },
{ ISD::BSWAP, MVT::v8i32, 4 },
{ ISD::BSWAP, MVT::v16i16, 4 },
- { ISD::CTLZ, MVT::v4i64, 46 },
- { ISD::CTLZ, MVT::v8i32, 36 },
- { ISD::CTLZ, MVT::v16i16, 28 },
- { ISD::CTLZ, MVT::v32i8, 18 },
- { ISD::CTPOP, MVT::v4i64, 14 },
- { ISD::CTPOP, MVT::v8i32, 22 },
- { ISD::CTPOP, MVT::v16i16, 18 },
- { ISD::CTPOP, MVT::v32i8, 12 },
- { ISD::CTTZ, MVT::v4i64, 20 },
- { ISD::CTTZ, MVT::v8i32, 28 },
- { ISD::CTTZ, MVT::v16i16, 24 },
- { ISD::CTTZ, MVT::v32i8, 18 },
+ { ISD::CTLZ, MVT::v4i64, 48 }, // 2 x 128-bit Op + extract/insert
+ { ISD::CTLZ, MVT::v8i32, 38 }, // 2 x 128-bit Op + extract/insert
+ { ISD::CTLZ, MVT::v16i16, 30 }, // 2 x 128-bit Op + extract/insert
+ { ISD::CTLZ, MVT::v32i8, 20 }, // 2 x 128-bit Op + extract/insert
+ { ISD::CTPOP, MVT::v4i64, 16 }, // 2 x 128-bit Op + extract/insert
+ { ISD::CTPOP, MVT::v8i32, 24 }, // 2 x 128-bit Op + extract/insert
+ { ISD::CTPOP, MVT::v16i16, 20 }, // 2 x 128-bit Op + extract/insert
+ { ISD::CTPOP, MVT::v32i8, 14 }, // 2 x 128-bit Op + extract/insert
+ { ISD::CTTZ, MVT::v4i64, 22 }, // 2 x 128-bit Op + extract/insert
+ { ISD::CTTZ, MVT::v8i32, 30 }, // 2 x 128-bit Op + extract/insert
+ { ISD::CTTZ, MVT::v16i16, 26 }, // 2 x 128-bit Op + extract/insert
+ { ISD::CTTZ, MVT::v32i8, 20 }, // 2 x 128-bit Op + extract/insert
{ ISD::FSQRT, MVT::f32, 14 }, // SNB from http://www.agner.org/
{ ISD::FSQRT, MVT::v4f32, 14 }, // SNB from http://www.agner.org/
{ ISD::FSQRT, MVT::v8f32, 28 }, // SNB from http://www.agner.org/
diff --git a/lib/Target/XCore/XCoreISelLowering.cpp b/lib/Target/XCore/XCoreISelLowering.cpp
index 4d3ecf25dc34..b8742683a0c8 100644
--- a/lib/Target/XCore/XCoreISelLowering.cpp
+++ b/lib/Target/XCore/XCoreISelLowering.cpp
@@ -1825,7 +1825,7 @@ void XCoreTargetLowering::computeKnownBitsForTargetNode(const SDValue Op,
const APInt &DemandedElts,
const SelectionDAG &DAG,
unsigned Depth) const {
- Known.Zero.clearAllBits(); Known.One.clearAllBits();
+ Known.resetAll();
switch (Op.getOpcode()) {
default: break;
case XCoreISD::LADD:
diff --git a/lib/Transforms/IPO/ArgumentPromotion.cpp b/lib/Transforms/IPO/ArgumentPromotion.cpp
index 6408cad08d55..d8cf8d3f5da2 100644
--- a/lib/Transforms/IPO/ArgumentPromotion.cpp
+++ b/lib/Transforms/IPO/ArgumentPromotion.cpp
@@ -247,7 +247,7 @@ doPromotion(Function *F, SmallPtrSetImpl<Argument *> &ArgsToPromote,
if (!ArgIndex.second.empty()) {
Ops.reserve(ArgIndex.second.size());
Type *ElTy = V->getType();
- for (unsigned long II : ArgIndex.second) {
+ for (auto II : ArgIndex.second) {
// Use i32 to index structs, and i64 for others (pointers/arrays).
// This satisfies GEP constraints.
Type *IdxTy =
diff --git a/lib/Transforms/IPO/FunctionImport.cpp b/lib/Transforms/IPO/FunctionImport.cpp
index c7ef2494e3b8..7ed07d63c627 100644
--- a/lib/Transforms/IPO/FunctionImport.cpp
+++ b/lib/Transforms/IPO/FunctionImport.cpp
@@ -117,7 +117,7 @@ namespace {
/// - [insert you fancy metric here]
static const GlobalValueSummary *
selectCallee(const ModuleSummaryIndex &Index,
- const GlobalValueSummaryList &CalleeSummaryList,
+ ArrayRef<std::unique_ptr<GlobalValueSummary>> CalleeSummaryList,
unsigned Threshold, StringRef CallerModulePath) {
auto It = llvm::find_if(
CalleeSummaryList,
@@ -168,19 +168,6 @@ selectCallee(const ModuleSummaryIndex &Index,
return cast<GlobalValueSummary>(It->get());
}
-/// Return the summary for the function \p GUID that fits the \p Threshold, or
-/// null if there's no match.
-static const GlobalValueSummary *selectCallee(GlobalValue::GUID GUID,
- unsigned Threshold,
- const ModuleSummaryIndex &Index,
- StringRef CallerModulePath) {
- auto CalleeSummaryList = Index.findGlobalValueSummaryList(GUID);
- if (CalleeSummaryList == Index.end())
- return nullptr; // This function does not have a summary
- return selectCallee(Index, CalleeSummaryList->second, Threshold,
- CallerModulePath);
-}
-
using EdgeInfo = std::tuple<const FunctionSummary *, unsigned /* Threshold */,
GlobalValue::GUID>;
@@ -194,19 +181,23 @@ static void computeImportForFunction(
FunctionImporter::ImportMapTy &ImportList,
StringMap<FunctionImporter::ExportSetTy> *ExportLists = nullptr) {
for (auto &Edge : Summary.calls()) {
- auto GUID = Edge.first.getGUID();
- DEBUG(dbgs() << " edge -> " << GUID << " Threshold:" << Threshold << "\n");
+ ValueInfo VI = Edge.first;
+ DEBUG(dbgs() << " edge -> " << VI.getGUID() << " Threshold:" << Threshold
+ << "\n");
- if (Index.findGlobalValueSummaryList(GUID) == Index.end()) {
+ if (VI.getSummaryList().empty()) {
// For SamplePGO, the indirect call targets for local functions will
// have its original name annotated in profile. We try to find the
// corresponding PGOFuncName as the GUID.
- GUID = Index.getGUIDFromOriginalID(GUID);
+ auto GUID = Index.getGUIDFromOriginalID(VI.getGUID());
if (GUID == 0)
continue;
+ VI = Index.getValueInfo(GUID);
+ if (!VI)
+ continue;
}
- if (DefinedGVSummaries.count(GUID)) {
+ if (DefinedGVSummaries.count(VI.getGUID())) {
DEBUG(dbgs() << "ignored! Target already in destination module.\n");
continue;
}
@@ -222,8 +213,8 @@ static void computeImportForFunction(
const auto NewThreshold =
Threshold * GetBonusMultiplier(Edge.second.Hotness);
- auto *CalleeSummary =
- selectCallee(GUID, NewThreshold, Index, Summary.modulePath());
+ auto *CalleeSummary = selectCallee(Index, VI.getSummaryList(), NewThreshold,
+ Summary.modulePath());
if (!CalleeSummary) {
DEBUG(dbgs() << "ignored! No qualifying callee with summary found.\n");
continue;
@@ -255,7 +246,7 @@ static void computeImportForFunction(
const auto AdjThreshold = GetAdjustedThreshold(Threshold, IsHotCallsite);
auto ExportModulePath = ResolvedCalleeSummary->modulePath();
- auto &ProcessedThreshold = ImportList[ExportModulePath][GUID];
+ auto &ProcessedThreshold = ImportList[ExportModulePath][VI.getGUID()];
/// Since the traversal of the call graph is DFS, we can revisit a function
/// a second time with a higher threshold. In this case, it is added back to
/// the worklist with the new threshold.
@@ -271,7 +262,7 @@ static void computeImportForFunction(
// Make exports in the source module.
if (ExportLists) {
auto &ExportList = (*ExportLists)[ExportModulePath];
- ExportList.insert(GUID);
+ ExportList.insert(VI.getGUID());
if (!PreviouslyImported) {
// This is the first time this function was exported from its source
// module, so mark all functions and globals it references as exported
@@ -291,7 +282,7 @@ static void computeImportForFunction(
}
// Insert the newly imported function to the worklist.
- Worklist.emplace_back(ResolvedCalleeSummary, AdjThreshold, GUID);
+ Worklist.emplace_back(ResolvedCalleeSummary, AdjThreshold, VI.getGUID());
}
}
@@ -431,57 +422,56 @@ DenseSet<GlobalValue::GUID> llvm::computeDeadSymbols(
if (GUIDPreservedSymbols.empty())
// Don't do anything when nothing is live, this is friendly with tests.
return DenseSet<GlobalValue::GUID>();
- DenseSet<GlobalValue::GUID> LiveSymbols = GUIDPreservedSymbols;
- SmallVector<GlobalValue::GUID, 128> Worklist;
- Worklist.reserve(LiveSymbols.size() * 2);
- for (auto GUID : LiveSymbols) {
- DEBUG(dbgs() << "Live root: " << GUID << "\n");
- Worklist.push_back(GUID);
+ DenseSet<ValueInfo> LiveSymbols;
+ SmallVector<ValueInfo, 128> Worklist;
+ Worklist.reserve(GUIDPreservedSymbols.size() * 2);
+ for (auto GUID : GUIDPreservedSymbols) {
+ ValueInfo VI = Index.getValueInfo(GUID);
+ if (!VI)
+ continue;
+ DEBUG(dbgs() << "Live root: " << VI.getGUID() << "\n");
+ LiveSymbols.insert(VI);
+ Worklist.push_back(VI);
}
// Add values flagged in the index as live roots to the worklist.
for (const auto &Entry : Index) {
bool IsLiveRoot = llvm::any_of(
- Entry.second,
+ Entry.second.SummaryList,
[&](const std::unique_ptr<llvm::GlobalValueSummary> &Summary) {
return Summary->liveRoot();
});
if (!IsLiveRoot)
continue;
DEBUG(dbgs() << "Live root (summary): " << Entry.first << "\n");
- Worklist.push_back(Entry.first);
+ Worklist.push_back(ValueInfo(&Entry));
}
while (!Worklist.empty()) {
- auto GUID = Worklist.pop_back_val();
- auto It = Index.findGlobalValueSummaryList(GUID);
- if (It == Index.end()) {
- DEBUG(dbgs() << "Not in index: " << GUID << "\n");
- continue;
- }
+ auto VI = Worklist.pop_back_val();
// FIXME: we should only make the prevailing copy live here
- for (auto &Summary : It->second) {
+ for (auto &Summary : VI.getSummaryList()) {
for (auto Ref : Summary->refs()) {
- auto RefGUID = Ref.getGUID();
- if (LiveSymbols.insert(RefGUID).second) {
- DEBUG(dbgs() << "Marking live (ref): " << RefGUID << "\n");
- Worklist.push_back(RefGUID);
+ if (LiveSymbols.insert(Ref).second) {
+ DEBUG(dbgs() << "Marking live (ref): " << Ref.getGUID() << "\n");
+ Worklist.push_back(Ref);
}
}
if (auto *FS = dyn_cast<FunctionSummary>(Summary.get())) {
for (auto Call : FS->calls()) {
- auto CallGUID = Call.first.getGUID();
- if (LiveSymbols.insert(CallGUID).second) {
- DEBUG(dbgs() << "Marking live (call): " << CallGUID << "\n");
- Worklist.push_back(CallGUID);
+ if (LiveSymbols.insert(Call.first).second) {
+ DEBUG(dbgs() << "Marking live (call): " << Call.first.getGUID()
+ << "\n");
+ Worklist.push_back(Call.first);
}
}
}
if (auto *AS = dyn_cast<AliasSummary>(Summary.get())) {
auto AliaseeGUID = AS->getAliasee().getOriginalName();
- if (LiveSymbols.insert(AliaseeGUID).second) {
+ ValueInfo AliaseeVI = Index.getValueInfo(AliaseeGUID);
+ if (AliaseeVI && LiveSymbols.insert(AliaseeVI).second) {
DEBUG(dbgs() << "Marking live (alias): " << AliaseeGUID << "\n");
- Worklist.push_back(AliaseeGUID);
+ Worklist.push_back(AliaseeVI);
}
}
}
@@ -490,10 +480,9 @@ DenseSet<GlobalValue::GUID> llvm::computeDeadSymbols(
DeadSymbols.reserve(
std::min(Index.size(), Index.size() - LiveSymbols.size()));
for (auto &Entry : Index) {
- auto GUID = Entry.first;
- if (!LiveSymbols.count(GUID)) {
- DEBUG(dbgs() << "Marking dead: " << GUID << "\n");
- DeadSymbols.insert(GUID);
+ if (!LiveSymbols.count(ValueInfo(&Entry))) {
+ DEBUG(dbgs() << "Marking dead: " << Entry.first << "\n");
+ DeadSymbols.insert(Entry.first);
}
}
DEBUG(dbgs() << LiveSymbols.size() << " symbols Live, and "
@@ -825,7 +814,7 @@ static bool doImportingForModule(Module &M) {
// is only enabled when testing importing via the 'opt' tool, which does
// not do the ThinLink that would normally determine what values to promote.
for (auto &I : *Index) {
- for (auto &S : I.second) {
+ for (auto &S : I.second.SummaryList) {
if (GlobalValue::isLocalLinkage(S->linkage()))
S->setLinkage(GlobalValue::ExternalLinkage);
}
diff --git a/lib/Transforms/IPO/LowerTypeTests.cpp b/lib/Transforms/IPO/LowerTypeTests.cpp
index 785207efbe5c..ca4ee92f971a 100644
--- a/lib/Transforms/IPO/LowerTypeTests.cpp
+++ b/lib/Transforms/IPO/LowerTypeTests.cpp
@@ -1440,7 +1440,7 @@ bool LowerTypeTestsModule::lower() {
}
for (auto &P : *ExportSummary) {
- for (auto &S : P.second) {
+ for (auto &S : P.second.SummaryList) {
auto *FS = dyn_cast<FunctionSummary>(S.get());
if (!FS)
continue;
diff --git a/lib/Transforms/IPO/WholeProgramDevirt.cpp b/lib/Transforms/IPO/WholeProgramDevirt.cpp
index cb7d487b68b0..aae22c5457ba 100644
--- a/lib/Transforms/IPO/WholeProgramDevirt.cpp
+++ b/lib/Transforms/IPO/WholeProgramDevirt.cpp
@@ -1322,7 +1322,7 @@ bool DevirtModule::run() {
}
for (auto &P : *ExportSummary) {
- for (auto &S : P.second) {
+ for (auto &S : P.second.SummaryList) {
auto *FS = dyn_cast<FunctionSummary>(S.get());
if (!FS)
continue;
diff --git a/lib/Transforms/InstCombine/InstCombineAddSub.cpp b/lib/Transforms/InstCombine/InstCombineAddSub.cpp
index 4f1f19499768..153a186d5ed4 100644
--- a/lib/Transforms/InstCombine/InstCombineAddSub.cpp
+++ b/lib/Transforms/InstCombine/InstCombineAddSub.cpp
@@ -847,29 +847,49 @@ Value *FAddCombine::createAddendVal(const FAddend &Opnd, bool &NeedNeg) {
return createFMul(OpndVal, Coeff.getValue(Instr->getType()));
}
-// If one of the operands only has one non-zero bit, and if the other
-// operand has a known-zero bit in a more significant place than it (not
-// including the sign bit) the ripple may go up to and fill the zero, but
-// won't change the sign. For example, (X & ~4) + 1.
-static bool checkRippleForAdd(const APInt &Op0KnownZero,
- const APInt &Op1KnownZero) {
- APInt Op1MaybeOne = ~Op1KnownZero;
- // Make sure that one of the operand has at most one bit set to 1.
- if (Op1MaybeOne.countPopulation() != 1)
- return false;
-
- // Find the most significant known 0 other than the sign bit.
- int BitWidth = Op0KnownZero.getBitWidth();
- APInt Op0KnownZeroTemp(Op0KnownZero);
- Op0KnownZeroTemp.clearSignBit();
- int Op0ZeroPosition = BitWidth - Op0KnownZeroTemp.countLeadingZeros() - 1;
-
- int Op1OnePosition = BitWidth - Op1MaybeOne.countLeadingZeros() - 1;
- assert(Op1OnePosition >= 0);
-
- // This also covers the case of no known zero, since in that case
- // Op0ZeroPosition is -1.
- return Op0ZeroPosition >= Op1OnePosition;
+/// \brief Return true if we can prove that adding the two values of the
+/// knownbits will not overflow.
+/// Otherwise return false.
+static bool checkRippleForAdd(const KnownBits &LHSKnown,
+ const KnownBits &RHSKnown) {
+ // Addition of two 2's complement numbers having opposite signs will never
+ // overflow.
+ if ((LHSKnown.isNegative() && RHSKnown.isNonNegative()) ||
+ (LHSKnown.isNonNegative() && RHSKnown.isNegative()))
+ return true;
+
+ // If either of the values is known to be non-negative, adding them can only
+ // overflow if the second is also non-negative, so we can assume that.
+ // Two non-negative numbers will only overflow if there is a carry to the
+ // sign bit, so we can check if even when the values are as big as possible
+ // there is no overflow to the sign bit.
+ if (LHSKnown.isNonNegative() || RHSKnown.isNonNegative()) {
+ APInt MaxLHS = ~LHSKnown.Zero;
+ MaxLHS.clearSignBit();
+ APInt MaxRHS = ~RHSKnown.Zero;
+ MaxRHS.clearSignBit();
+ APInt Result = std::move(MaxLHS) + std::move(MaxRHS);
+ return Result.isSignBitClear();
+ }
+
+ // If either of the values is known to be negative, adding them can only
+ // overflow if the second is also negative, so we can assume that.
+ // Two negative number will only overflow if there is no carry to the sign
+ // bit, so we can check if even when the values are as small as possible
+ // there is overflow to the sign bit.
+ if (LHSKnown.isNegative() || RHSKnown.isNegative()) {
+ APInt MinLHS = LHSKnown.One;
+ MinLHS.clearSignBit();
+ APInt MinRHS = RHSKnown.One;
+ MinRHS.clearSignBit();
+ APInt Result = std::move(MinLHS) + std::move(MinRHS);
+ return Result.isSignBitSet();
+ }
+
+ // If we reached here it means that we know nothing about the sign bits.
+ // In this case we can't know if there will be an overflow, since by
+ // changing the sign bits any two values can be made to overflow.
+ return false;
}
/// Return true if we can prove that:
@@ -906,16 +926,8 @@ bool InstCombiner::WillNotOverflowSignedAdd(Value *LHS, Value *RHS,
KnownBits RHSKnown(BitWidth);
computeKnownBits(RHS, RHSKnown, 0, &CxtI);
- // Addition of two 2's complement numbers having opposite signs will never
- // overflow.
- if ((LHSKnown.One[BitWidth - 1] && RHSKnown.Zero[BitWidth - 1]) ||
- (LHSKnown.Zero[BitWidth - 1] && RHSKnown.One[BitWidth - 1]))
- return true;
-
// Check if carry bit of addition will not cause overflow.
- if (checkRippleForAdd(LHSKnown.Zero, RHSKnown.Zero))
- return true;
- if (checkRippleForAdd(RHSKnown.Zero, LHSKnown.Zero))
+ if (checkRippleForAdd(LHSKnown, RHSKnown))
return true;
return false;
diff --git a/lib/Transforms/InstCombine/InstCombineAndOrXor.cpp b/lib/Transforms/InstCombine/InstCombineAndOrXor.cpp
index c7092bf3a398..b114801cc1c0 100644
--- a/lib/Transforms/InstCombine/InstCombineAndOrXor.cpp
+++ b/lib/Transforms/InstCombine/InstCombineAndOrXor.cpp
@@ -1834,25 +1834,8 @@ Value *InstCombiner::FoldOrOfICmps(ICmpInst *LHS, ICmpInst *RHS,
case ICmpInst::ICMP_UGT: // (X == 13 | X u> 14) -> no change
case ICmpInst::ICMP_SGT: // (X == 13 | X s> 14) -> no change
break;
- case ICmpInst::ICMP_NE: // (X == 13 | X != 15) -> X != 15
- case ICmpInst::ICMP_ULT: // (X == 13 | X u< 15) -> X u< 15
- case ICmpInst::ICMP_SLT: // (X == 13 | X s< 15) -> X s< 15
- return RHS;
}
break;
- case ICmpInst::ICMP_NE:
- switch (PredR) {
- default:
- llvm_unreachable("Unknown integer condition code!");
- case ICmpInst::ICMP_EQ: // (X != 13 | X == 15) -> X != 13
- case ICmpInst::ICMP_UGT: // (X != 13 | X u> 15) -> X != 13
- case ICmpInst::ICMP_SGT: // (X != 13 | X s> 15) -> X != 13
- return LHS;
- case ICmpInst::ICMP_NE: // (X != 13 | X != 15) -> true
- case ICmpInst::ICMP_ULT: // (X != 13 | X u< 15) -> true
- case ICmpInst::ICMP_SLT: // (X != 13 | X s< 15) -> true
- return Builder->getTrue();
- }
case ICmpInst::ICMP_ULT:
switch (PredR) {
default:
@@ -1860,15 +1843,9 @@ Value *InstCombiner::FoldOrOfICmps(ICmpInst *LHS, ICmpInst *RHS,
case ICmpInst::ICMP_EQ: // (X u< 13 | X == 14) -> no change
break;
case ICmpInst::ICMP_UGT: // (X u< 13 | X u> 15) -> (X-13) u> 2
- // If RHSC is [us]MAXINT, it is always false. Not handling
- // this can cause overflow.
- if (RHSC->isMaxValue(false))
- return LHS;
+ assert(!RHSC->isMaxValue(false) && "Missed icmp simplification");
return insertRangeTest(LHS0, LHSC->getValue(), RHSC->getValue() + 1,
false, false);
- case ICmpInst::ICMP_NE: // (X u< 13 | X != 15) -> X != 15
- case ICmpInst::ICMP_ULT: // (X u< 13 | X u< 15) -> X u< 15
- return RHS;
}
break;
case ICmpInst::ICMP_SLT:
@@ -1878,39 +1855,9 @@ Value *InstCombiner::FoldOrOfICmps(ICmpInst *LHS, ICmpInst *RHS,
case ICmpInst::ICMP_EQ: // (X s< 13 | X == 14) -> no change
break;
case ICmpInst::ICMP_SGT: // (X s< 13 | X s> 15) -> (X-13) s> 2
- // If RHSC is [us]MAXINT, it is always false. Not handling
- // this can cause overflow.
- if (RHSC->isMaxValue(true))
- return LHS;
+ assert(!RHSC->isMaxValue(true) && "Missed icmp simplification");
return insertRangeTest(LHS0, LHSC->getValue(), RHSC->getValue() + 1, true,
false);
- case ICmpInst::ICMP_NE: // (X s< 13 | X != 15) -> X != 15
- case ICmpInst::ICMP_SLT: // (X s< 13 | X s< 15) -> X s< 15
- return RHS;
- }
- break;
- case ICmpInst::ICMP_UGT:
- switch (PredR) {
- default:
- llvm_unreachable("Unknown integer condition code!");
- case ICmpInst::ICMP_EQ: // (X u> 13 | X == 15) -> X u> 13
- case ICmpInst::ICMP_UGT: // (X u> 13 | X u> 15) -> X u> 13
- return LHS;
- case ICmpInst::ICMP_NE: // (X u> 13 | X != 15) -> true
- case ICmpInst::ICMP_ULT: // (X u> 13 | X u< 15) -> true
- return Builder->getTrue();
- }
- break;
- case ICmpInst::ICMP_SGT:
- switch (PredR) {
- default:
- llvm_unreachable("Unknown integer condition code!");
- case ICmpInst::ICMP_EQ: // (X s> 13 | X == 15) -> X > 13
- case ICmpInst::ICMP_SGT: // (X s> 13 | X s> 15) -> X > 13
- return LHS;
- case ICmpInst::ICMP_NE: // (X s> 13 | X != 15) -> true
- case ICmpInst::ICMP_SLT: // (X s> 13 | X s< 15) -> true
- return Builder->getTrue();
}
break;
}
diff --git a/lib/Transforms/InstCombine/InstCombineCalls.cpp b/lib/Transforms/InstCombine/InstCombineCalls.cpp
index 4fd90d78a63b..6989d67f0060 100644
--- a/lib/Transforms/InstCombine/InstCombineCalls.cpp
+++ b/lib/Transforms/InstCombine/InstCombineCalls.cpp
@@ -3619,7 +3619,7 @@ Instruction *InstCombiner::visitCallInst(CallInst &CI) {
// then this one is redundant, and should be removed.
KnownBits Known(1);
computeKnownBits(IIOperand, Known, 0, II);
- if (Known.One.isAllOnesValue())
+ if (Known.isAllOnes())
return eraseInstFromFunction(*II);
// Update the cache of affected values for this assumption (we might be
diff --git a/lib/Transforms/InstCombine/InstCombineCompares.cpp b/lib/Transforms/InstCombine/InstCombineCompares.cpp
index 60970775de63..34ce235b3fe2 100644
--- a/lib/Transforms/InstCombine/InstCombineCompares.cpp
+++ b/lib/Transforms/InstCombine/InstCombineCompares.cpp
@@ -4050,7 +4050,7 @@ Instruction *InstCombiner::foldICmpUsingKnownBits(ICmpInst &I) {
// is set. If the comparison is against zero, then this is a check to see if
// *that* bit is set.
APInt Op0KnownZeroInverted = ~Op0Known.Zero;
- if (~Op1Known.Zero == 0) {
+ if (Op1Known.isZero()) {
// If the LHS is an AND with the same constant, look through it.
Value *LHS = nullptr;
const APInt *LHSC;
diff --git a/lib/Transforms/InstCombine/InstCombineSimplifyDemanded.cpp b/lib/Transforms/InstCombine/InstCombineSimplifyDemanded.cpp
index 0195c5e727c9..05b01774cd5e 100644
--- a/lib/Transforms/InstCombine/InstCombineSimplifyDemanded.cpp
+++ b/lib/Transforms/InstCombine/InstCombineSimplifyDemanded.cpp
@@ -120,8 +120,7 @@ Value *InstCombiner::SimplifyDemandedUseBits(Value *V, APInt DemandedMask,
return nullptr;
}
- Known.Zero.clearAllBits();
- Known.One.clearAllBits();
+ Known.resetAll();
if (DemandedMask == 0) // Not demanding any bits from V.
return UndefValue::get(VTy);
@@ -329,13 +328,11 @@ Value *InstCombiner::SimplifyDemandedUseBits(Value *V, APInt DemandedMask,
case Instruction::Trunc: {
unsigned truncBf = I->getOperand(0)->getType()->getScalarSizeInBits();
DemandedMask = DemandedMask.zext(truncBf);
- Known.Zero = Known.Zero.zext(truncBf);
- Known.One = Known.One.zext(truncBf);
+ Known = Known.zext(truncBf);
if (SimplifyDemandedBits(I, 0, DemandedMask, Known, Depth + 1))
return I;
DemandedMask = DemandedMask.trunc(BitWidth);
- Known.Zero = Known.Zero.trunc(BitWidth);
- Known.One = Known.One.trunc(BitWidth);
+ Known = Known.trunc(BitWidth);
assert(!(Known.Zero & Known.One) && "Bits known to be one AND zero?");
break;
}
@@ -365,13 +362,11 @@ Value *InstCombiner::SimplifyDemandedUseBits(Value *V, APInt DemandedMask,
unsigned SrcBitWidth =I->getOperand(0)->getType()->getScalarSizeInBits();
DemandedMask = DemandedMask.trunc(SrcBitWidth);
- Known.Zero = Known.Zero.trunc(SrcBitWidth);
- Known.One = Known.One.trunc(SrcBitWidth);
+ Known = Known.trunc(SrcBitWidth);
if (SimplifyDemandedBits(I, 0, DemandedMask, Known, Depth + 1))
return I;
DemandedMask = DemandedMask.zext(BitWidth);
- Known.Zero = Known.Zero.zext(BitWidth);
- Known.One = Known.One.zext(BitWidth);
+ Known = Known.zext(BitWidth);
assert(!(Known.Zero & Known.One) && "Bits known to be one AND zero?");
// The top bits are known to be zero.
Known.Zero.setBitsFrom(SrcBitWidth);
@@ -391,13 +386,11 @@ Value *InstCombiner::SimplifyDemandedUseBits(Value *V, APInt DemandedMask,
InputDemandedBits.setBit(SrcBitWidth-1);
InputDemandedBits = InputDemandedBits.trunc(SrcBitWidth);
- Known.Zero = Known.Zero.trunc(SrcBitWidth);
- Known.One = Known.One.trunc(SrcBitWidth);
+ Known = Known.trunc(SrcBitWidth);
if (SimplifyDemandedBits(I, 0, InputDemandedBits, Known, Depth + 1))
return I;
InputDemandedBits = InputDemandedBits.zext(BitWidth);
- Known.Zero = Known.Zero.zext(BitWidth);
- Known.One = Known.One.zext(BitWidth);
+ Known = Known.zext(BitWidth);
assert(!(Known.Zero & Known.One) && "Bits known to be one AND zero?");
// If the sign bit of the input is known set or clear, then we know the
diff --git a/lib/Transforms/InstCombine/InstructionCombining.cpp b/lib/Transforms/InstCombine/InstructionCombining.cpp
index 1eb98b18bfb5..1792cb585f87 100644
--- a/lib/Transforms/InstCombine/InstructionCombining.cpp
+++ b/lib/Transforms/InstCombine/InstructionCombining.cpp
@@ -2182,8 +2182,8 @@ Instruction *InstCombiner::visitReturnInst(ReturnInst &RI) {
// determine the value. If so, constant fold it.
KnownBits Known(VTy->getPrimitiveSizeInBits());
computeKnownBits(ResultOp, Known, 0, &RI);
- if ((Known.Zero|Known.One).isAllOnesValue())
- RI.setOperand(0, Constant::getIntegerValue(VTy, Known.One));
+ if (Known.isConstant())
+ RI.setOperand(0, Constant::getIntegerValue(VTy, Known.getConstant()));
return nullptr;
}
@@ -2863,8 +2863,8 @@ bool InstCombiner::run() {
unsigned BitWidth = Ty->getScalarSizeInBits();
KnownBits Known(BitWidth);
computeKnownBits(I, Known, /*Depth*/0, I);
- if ((Known.Zero | Known.One).isAllOnesValue()) {
- Constant *C = ConstantInt::get(Ty, Known.One);
+ if (Known.isConstant()) {
+ Constant *C = ConstantInt::get(Ty, Known.getConstant());
DEBUG(dbgs() << "IC: ConstFold (all bits known) to: " << *C <<
" from: " << *I << '\n');
diff --git a/lib/Transforms/Instrumentation/IndirectCallPromotion.cpp b/lib/Transforms/Instrumentation/IndirectCallPromotion.cpp
index 493d014586c6..96027bc3d0a9 100644
--- a/lib/Transforms/Instrumentation/IndirectCallPromotion.cpp
+++ b/lib/Transforms/Instrumentation/IndirectCallPromotion.cpp
@@ -70,13 +70,13 @@ static cl::opt<bool> DisableICP("disable-icp", cl::init(false), cl::Hidden,
// For debug use only.
static cl::opt<unsigned>
ICPCutOff("icp-cutoff", cl::init(0), cl::Hidden, cl::ZeroOrMore,
- cl::desc("Max number of promotions for this compilaiton"));
+ cl::desc("Max number of promotions for this compilation"));
// If ICPCSSkip is non zero, the first ICPCSSkip callsites will be skipped.
// For debug use only.
static cl::opt<unsigned>
ICPCSSkip("icp-csskip", cl::init(0), cl::Hidden, cl::ZeroOrMore,
- cl::desc("Skip Callsite up to this number for this compilaiton"));
+ cl::desc("Skip Callsite up to this number for this compilation"));
// Set if the pass is called in LTO optimization. The difference for LTO mode
// is the pass won't prefix the source module name to the internal linkage
diff --git a/lib/Transforms/Instrumentation/SanitizerCoverage.cpp b/lib/Transforms/Instrumentation/SanitizerCoverage.cpp
index 8bdd917a0596..4bc0a7133118 100644
--- a/lib/Transforms/Instrumentation/SanitizerCoverage.cpp
+++ b/lib/Transforms/Instrumentation/SanitizerCoverage.cpp
@@ -151,6 +151,7 @@ SanitizerCoverageOptions OverrideFromCL(SanitizerCoverageOptions Options) {
Options.TraceGep |= ClGEPTracing;
Options.TracePC |= ClExperimentalTracePC;
Options.TracePCGuard |= ClTracePCGuard;
+ Options.NoPrune |= !ClPruneBlocks;
return Options;
}
@@ -380,8 +381,10 @@ static bool isFullPostDominator(const BasicBlock *BB,
return true;
}
-static bool shouldInstrumentBlock(const Function& F, const BasicBlock *BB, const DominatorTree *DT,
- const PostDominatorTree *PDT) {
+static bool shouldInstrumentBlock(const Function &F, const BasicBlock *BB,
+ const DominatorTree *DT,
+ const PostDominatorTree *PDT,
+ const SanitizerCoverageOptions &Options) {
// Don't insert coverage for unreachable blocks: we will never call
// __sanitizer_cov() for them, so counting them in
// NumberOfInstrumentedBlocks() might complicate calculation of code coverage
@@ -395,7 +398,7 @@ static bool shouldInstrumentBlock(const Function& F, const BasicBlock *BB, const
if (BB->getFirstInsertionPt() == BB->end())
return false;
- if (!ClPruneBlocks || &F.getEntryBlock() == BB)
+ if (Options.NoPrune || &F.getEntryBlock() == BB)
return true;
return !(isFullDominator(BB, DT) || isFullPostDominator(BB, PDT));
@@ -434,7 +437,7 @@ bool SanitizerCoverageModule::runOnFunction(Function &F) {
&getAnalysis<PostDominatorTreeWrapperPass>(F).getPostDomTree();
for (auto &BB : F) {
- if (shouldInstrumentBlock(F, &BB, DT, PDT))
+ if (shouldInstrumentBlock(F, &BB, DT, PDT, Options))
BlocksToInstrument.push_back(&BB);
for (auto &Inst : BB) {
if (Options.IndirectCalls) {
diff --git a/lib/Transforms/Scalar/Float2Int.cpp b/lib/Transforms/Scalar/Float2Int.cpp
index 8a5af6195f1b..b105ece8dc7c 100644
--- a/lib/Transforms/Scalar/Float2Int.cpp
+++ b/lib/Transforms/Scalar/Float2Int.cpp
@@ -137,13 +137,13 @@ void Float2IntPass::findRoots(Function &F, SmallPtrSet<Instruction*,8> &Roots) {
}
// Helper - mark I as having been traversed, having range R.
-ConstantRange Float2IntPass::seen(Instruction *I, ConstantRange R) {
+void Float2IntPass::seen(Instruction *I, ConstantRange R) {
DEBUG(dbgs() << "F2I: " << *I << ":" << R << "\n");
- if (SeenInsts.find(I) != SeenInsts.end())
- SeenInsts.find(I)->second = R;
+ auto IT = SeenInsts.find(I);
+ if (IT != SeenInsts.end())
+ IT->second = std::move(R);
else
- SeenInsts.insert(std::make_pair(I, R));
- return R;
+ SeenInsts.insert(std::make_pair(I, std::move(R)));
}
// Helper - get a range representing a poison value.
diff --git a/lib/Transforms/Scalar/JumpThreading.cpp b/lib/Transforms/Scalar/JumpThreading.cpp
index 7dacaba1193e..ae353ea44595 100644
--- a/lib/Transforms/Scalar/JumpThreading.cpp
+++ b/lib/Transforms/Scalar/JumpThreading.cpp
@@ -580,17 +580,17 @@ bool JumpThreadingPass::ComputeValueKnownInPredecessors(
// If comparing a live-in value against a constant, see if we know the
// live-in value on any predecessors.
- if (isa<Constant>(Cmp->getOperand(1)) && Cmp->getType()->isIntegerTy()) {
+ if (isa<Constant>(Cmp->getOperand(1)) && !Cmp->getType()->isVectorTy()) {
+ Constant *CmpConst = cast<Constant>(Cmp->getOperand(1));
+
if (!isa<Instruction>(Cmp->getOperand(0)) ||
cast<Instruction>(Cmp->getOperand(0))->getParent() != BB) {
- Constant *RHSCst = cast<Constant>(Cmp->getOperand(1));
-
for (BasicBlock *P : predecessors(BB)) {
// If the value is known by LazyValueInfo to be a constant in a
// predecessor, use that information to try to thread this block.
LazyValueInfo::Tristate Res =
LVI->getPredicateOnEdge(Cmp->getPredicate(), Cmp->getOperand(0),
- RHSCst, P, BB, CxtI ? CxtI : Cmp);
+ CmpConst, P, BB, CxtI ? CxtI : Cmp);
if (Res == LazyValueInfo::Unknown)
continue;
@@ -603,21 +603,19 @@ bool JumpThreadingPass::ComputeValueKnownInPredecessors(
// Try to find a constant value for the LHS of a comparison,
// and evaluate it statically if we can.
- if (Constant *CmpConst = dyn_cast<Constant>(Cmp->getOperand(1))) {
- PredValueInfoTy LHSVals;
- ComputeValueKnownInPredecessors(I->getOperand(0), BB, LHSVals,
- WantInteger, CxtI);
-
- for (const auto &LHSVal : LHSVals) {
- Constant *V = LHSVal.first;
- Constant *Folded = ConstantExpr::getCompare(Cmp->getPredicate(),
- V, CmpConst);
- if (Constant *KC = getKnownConstant(Folded, WantInteger))
- Result.push_back(std::make_pair(KC, LHSVal.second));
- }
+ PredValueInfoTy LHSVals;
+ ComputeValueKnownInPredecessors(I->getOperand(0), BB, LHSVals,
+ WantInteger, CxtI);
- return !Result.empty();
+ for (const auto &LHSVal : LHSVals) {
+ Constant *V = LHSVal.first;
+ Constant *Folded = ConstantExpr::getCompare(Cmp->getPredicate(),
+ V, CmpConst);
+ if (Constant *KC = getKnownConstant(Folded, WantInteger))
+ Result.push_back(std::make_pair(KC, LHSVal.second));
}
+
+ return !Result.empty();
}
}
diff --git a/lib/Transforms/Scalar/LoopIdiomRecognize.cpp b/lib/Transforms/Scalar/LoopIdiomRecognize.cpp
index 410fbb03068f..48d5ae88cda9 100644
--- a/lib/Transforms/Scalar/LoopIdiomRecognize.cpp
+++ b/lib/Transforms/Scalar/LoopIdiomRecognize.cpp
@@ -783,6 +783,11 @@ bool LoopIdiomRecognize::processLoopStridedStore(
if (NegStride)
Start = getStartForNegStride(Start, BECount, IntPtr, StoreSize, SE);
+ // TODO: ideally we should still be able to generate memset if SCEV expander
+ // is taught to generate the dependencies at the latest point.
+ if (!isSafeToExpand(Start, *SE))
+ return false;
+
// Okay, we have a strided store "p[i]" of a splattable value. We can turn
// this into a memset in the loop preheader now if we want. However, this
// would be unsafe to do if there is anything else in the loop that may read
@@ -814,6 +819,11 @@ bool LoopIdiomRecognize::processLoopStridedStore(
SCEV::FlagNUW);
}
+ // TODO: ideally we should still be able to generate memset if SCEV expander
+ // is taught to generate the dependencies at the latest point.
+ if (!isSafeToExpand(NumBytesS, *SE))
+ return false;
+
Value *NumBytes =
Expander.expandCodeFor(NumBytesS, IntPtr, Preheader->getTerminator());
diff --git a/lib/Transforms/Scalar/NewGVN.cpp b/lib/Transforms/Scalar/NewGVN.cpp
index 62b5d80d611b..3c9850b156ac 100644
--- a/lib/Transforms/Scalar/NewGVN.cpp
+++ b/lib/Transforms/Scalar/NewGVN.cpp
@@ -2494,12 +2494,11 @@ void NewGVN::verifyMemoryCongruency() const {
continue;
if (CC->getStoreCount() != 0) {
assert((CC->getStoredValue() || !isa<StoreInst>(CC->getLeader())) &&
- "Any class with a store as a "
- "leader should have a "
- "representative stored value\n");
+ "Any class with a store as a leader should have a "
+ "representative stored value");
assert(CC->getMemoryLeader() &&
- "Any congruence class with a store should "
- "have a representative access\n");
+ "Any congruence class with a store should have a "
+ "representative access");
}
if (CC->getMemoryLeader())
diff --git a/lib/Transforms/Scalar/TailRecursionElimination.cpp b/lib/Transforms/Scalar/TailRecursionElimination.cpp
index a6b9fee1d8ac..bf54a51c7635 100644
--- a/lib/Transforms/Scalar/TailRecursionElimination.cpp
+++ b/lib/Transforms/Scalar/TailRecursionElimination.cpp
@@ -492,11 +492,10 @@ static CallInst *findTRECandidate(Instruction *TI,
return CI;
}
-static bool eliminateRecursiveTailCall(CallInst *CI, ReturnInst *Ret,
- BasicBlock *&OldEntry,
- bool &TailCallsAreMarkedTail,
- SmallVectorImpl<PHINode *> &ArgumentPHIs,
- bool CannotTailCallElimCallsMarkedTail) {
+static bool
+eliminateRecursiveTailCall(CallInst *CI, ReturnInst *Ret, BasicBlock *&OldEntry,
+ bool &TailCallsAreMarkedTail,
+ SmallVectorImpl<PHINode *> &ArgumentPHIs) {
// If we are introducing accumulator recursion to eliminate operations after
// the call instruction that are both associative and commutative, the initial
// value for the accumulator is placed in this variable. If this value is set
@@ -707,8 +706,7 @@ static bool foldReturnAndProcessPred(BasicBlock *BB, ReturnInst *Ret,
BB->eraseFromParent();
eliminateRecursiveTailCall(CI, RI, OldEntry, TailCallsAreMarkedTail,
- ArgumentPHIs,
- CannotTailCallElimCallsMarkedTail);
+ ArgumentPHIs);
++NumRetDuped;
Change = true;
}
@@ -727,8 +725,7 @@ static bool processReturningBlock(ReturnInst *Ret, BasicBlock *&OldEntry,
return false;
return eliminateRecursiveTailCall(CI, Ret, OldEntry, TailCallsAreMarkedTail,
- ArgumentPHIs,
- CannotTailCallElimCallsMarkedTail);
+ ArgumentPHIs);
}
static bool eliminateTailRecursion(Function &F, const TargetTransformInfo *TTI) {
diff --git a/lib/Transforms/Utils/BuildLibCalls.cpp b/lib/Transforms/Utils/BuildLibCalls.cpp
index 1956697ccb8b..ebde1f9a17dd 100644
--- a/lib/Transforms/Utils/BuildLibCalls.cpp
+++ b/lib/Transforms/Utils/BuildLibCalls.cpp
@@ -113,6 +113,7 @@ bool llvm::inferLibFuncAttributes(Function &F, const TargetLibraryInfo &TLI) {
bool Changed = false;
switch (TheLibFunc) {
case LibFunc_strlen:
+ case LibFunc_wcslen:
Changed |= setOnlyReadsMemory(F);
Changed |= setDoesNotThrow(F);
Changed |= setDoesNotCapture(F, 0);
diff --git a/lib/Transforms/Utils/SimplifyCFG.cpp b/lib/Transforms/Utils/SimplifyCFG.cpp
index 7a3e8b9ae915..b44bc74d6551 100644
--- a/lib/Transforms/Utils/SimplifyCFG.cpp
+++ b/lib/Transforms/Utils/SimplifyCFG.cpp
@@ -596,7 +596,7 @@ private:
Span = Span.inverse();
// If there are a ton of values, we don't want to make a ginormous switch.
- if (Span.getSetSize().ugt(8) || Span.isEmptySet()) {
+ if (Span.isSizeLargerThan(8) || Span.isEmptySet()) {
return false;
}
diff --git a/lib/Transforms/Utils/ValueMapper.cpp b/lib/Transforms/Utils/ValueMapper.cpp
index f77c10b6dd47..84d89f103a2f 100644
--- a/lib/Transforms/Utils/ValueMapper.cpp
+++ b/lib/Transforms/Utils/ValueMapper.cpp
@@ -121,6 +121,8 @@ public:
void addFlags(RemapFlags Flags);
+ void remapGlobalObjectMetadata(GlobalObject &GO);
+
Value *mapValue(const Value *V);
void remapInstruction(Instruction *I);
void remapFunction(Function &F);
@@ -802,6 +804,7 @@ void Mapper::flush() {
switch (E.Kind) {
case WorklistEntry::MapGlobalInit:
E.Data.GVInit.GV->setInitializer(mapConstant(E.Data.GVInit.Init));
+ remapGlobalObjectMetadata(*E.Data.GVInit.GV);
break;
case WorklistEntry::MapAppendingVar: {
unsigned PrefixSize = AppendingInits.size() - E.AppendingGVNumNewMembers;
@@ -892,6 +895,14 @@ void Mapper::remapInstruction(Instruction *I) {
I->mutateType(TypeMapper->remapType(I->getType()));
}
+void Mapper::remapGlobalObjectMetadata(GlobalObject &GO) {
+ SmallVector<std::pair<unsigned, MDNode *>, 8> MDs;
+ GO.getAllMetadata(MDs);
+ GO.clearMetadata();
+ for (const auto &I : MDs)
+ GO.addMetadata(I.first, *cast<MDNode>(mapMetadata(I.second)));
+}
+
void Mapper::remapFunction(Function &F) {
// Remap the operands.
for (Use &Op : F.operands())
@@ -899,11 +910,7 @@ void Mapper::remapFunction(Function &F) {
Op = mapValue(Op);
// Remap the metadata attachments.
- SmallVector<std::pair<unsigned, MDNode *>, 8> MDs;
- F.getAllMetadata(MDs);
- F.clearMetadata();
- for (const auto &I : MDs)
- F.addMetadata(I.first, *cast<MDNode>(mapMetadata(I.second)));
+ remapGlobalObjectMetadata(F);
// Remap the argument types.
if (TypeMapper)
diff --git a/lib/Transforms/Vectorize/LoopVectorize.cpp b/lib/Transforms/Vectorize/LoopVectorize.cpp
index 87ce0194dad6..3fde0a453962 100644
--- a/lib/Transforms/Vectorize/LoopVectorize.cpp
+++ b/lib/Transforms/Vectorize/LoopVectorize.cpp
@@ -7178,7 +7178,7 @@ unsigned LoopVectorizationCostModel::getMemoryInstructionCost(Instruction *I,
if (VF == 1) {
Type *ValTy = getMemInstValueType(I);
unsigned Alignment = getMemInstAlignment(I);
- unsigned AS = getMemInstAlignment(I);
+ unsigned AS = getMemInstAddressSpace(I);
return TTI.getAddressComputationCost(ValTy) +
TTI.getMemoryOpCost(I->getOpcode(), ValTy, Alignment, AS, I);
diff --git a/test/Analysis/CostModel/X86/bitreverse.ll b/test/Analysis/CostModel/X86/bitreverse.ll
index 2eb63babdc34..8d5e1421eb82 100644
--- a/test/Analysis/CostModel/X86/bitreverse.ll
+++ b/test/Analysis/CostModel/X86/bitreverse.ll
@@ -79,7 +79,7 @@ define <4 x i64> @var_bitreverse_v4i64(<4 x i64> %a) {
; CHECK: 'Cost Model Analysis' for function 'var_bitreverse_v4i64':
; SSE2: Found an estimated cost of 58 for instruction: %bitreverse
; SSE42: Found an estimated cost of 10 for instruction: %bitreverse
-; AVX: Found an estimated cost of 10 for instruction: %bitreverse
+; AVX: Found an estimated cost of 12 for instruction: %bitreverse
; AVX2: Found an estimated cost of 5 for instruction: %bitreverse
; XOP: Found an estimated cost of 4 for instruction: %bitreverse
%bitreverse = call <4 x i64> @llvm.bitreverse.v4i64(<4 x i64> %a)
@@ -101,7 +101,7 @@ define <8 x i32> @var_bitreverse_v8i32(<8 x i32> %a) {
; CHECK: 'Cost Model Analysis' for function 'var_bitreverse_v8i32':
; SSE2: Found an estimated cost of 54 for instruction: %bitreverse
; SSE42: Found an estimated cost of 10 for instruction: %bitreverse
-; AVX: Found an estimated cost of 10 for instruction: %bitreverse
+; AVX: Found an estimated cost of 12 for instruction: %bitreverse
; AVX2: Found an estimated cost of 5 for instruction: %bitreverse
; XOP: Found an estimated cost of 4 for instruction: %bitreverse
%bitreverse = call <8 x i32> @llvm.bitreverse.v8i32(<8 x i32> %a)
@@ -123,7 +123,7 @@ define <16 x i16> @var_bitreverse_v16i16(<16 x i16> %a) {
; CHECK: 'Cost Model Analysis' for function 'var_bitreverse_v16i16':
; SSE2: Found an estimated cost of 54 for instruction: %bitreverse
; SSE42: Found an estimated cost of 10 for instruction: %bitreverse
-; AVX: Found an estimated cost of 10 for instruction: %bitreverse
+; AVX: Found an estimated cost of 12 for instruction: %bitreverse
; AVX2: Found an estimated cost of 5 for instruction: %bitreverse
; XOP: Found an estimated cost of 4 for instruction: %bitreverse
%bitreverse = call <16 x i16> @llvm.bitreverse.v16i16(<16 x i16> %a)
@@ -145,7 +145,7 @@ define <32 x i8> @var_bitreverse_v32i8(<32 x i8> %a) {
; CHECK: 'Cost Model Analysis' for function 'var_bitreverse_v32i8':
; SSE2: Found an estimated cost of 40 for instruction: %bitreverse
; SSE42: Found an estimated cost of 10 for instruction: %bitreverse
-; AVX: Found an estimated cost of 10 for instruction: %bitreverse
+; AVX: Found an estimated cost of 12 for instruction: %bitreverse
; AVX2: Found an estimated cost of 5 for instruction: %bitreverse
; XOP: Found an estimated cost of 4 for instruction: %bitreverse
%bitreverse = call <32 x i8> @llvm.bitreverse.v32i8(<32 x i8> %a)
diff --git a/test/Analysis/CostModel/X86/ctbits-cost.ll b/test/Analysis/CostModel/X86/ctbits-cost.ll
index 8c7fa9d73151..aaf092c7b1d7 100644
--- a/test/Analysis/CostModel/X86/ctbits-cost.ll
+++ b/test/Analysis/CostModel/X86/ctbits-cost.ll
@@ -69,7 +69,7 @@ define <4 x i64> @var_ctpop_v4i64(<4 x i64> %a) {
; CHECK: 'Cost Model Analysis' for function 'var_ctpop_v4i64':
; SSE2: Found an estimated cost of 24 for instruction: %ctpop
; SSE42: Found an estimated cost of 14 for instruction: %ctpop
-; AVX1: Found an estimated cost of 14 for instruction: %ctpop
+; AVX1: Found an estimated cost of 16 for instruction: %ctpop
; AVX2: Found an estimated cost of 7 for instruction: %ctpop
%ctpop = call <4 x i64> @llvm.ctpop.v4i64(<4 x i64> %a)
ret <4 x i64> %ctpop
@@ -88,7 +88,7 @@ define <8 x i32> @var_ctpop_v8i32(<8 x i32> %a) {
; CHECK: 'Cost Model Analysis' for function 'var_ctpop_v8i32':
; SSE2: Found an estimated cost of 30 for instruction: %ctpop
; SSE42: Found an estimated cost of 22 for instruction: %ctpop
-; AVX1: Found an estimated cost of 22 for instruction: %ctpop
+; AVX1: Found an estimated cost of 24 for instruction: %ctpop
; AVX2: Found an estimated cost of 11 for instruction: %ctpop
%ctpop = call <8 x i32> @llvm.ctpop.v8i32(<8 x i32> %a)
ret <8 x i32> %ctpop
@@ -107,7 +107,7 @@ define <16 x i16> @var_ctpop_v16i16(<16 x i16> %a) {
; CHECK: 'Cost Model Analysis' for function 'var_ctpop_v16i16':
; SSE2: Found an estimated cost of 26 for instruction: %ctpop
; SSE42: Found an estimated cost of 18 for instruction: %ctpop
-; AVX1: Found an estimated cost of 18 for instruction: %ctpop
+; AVX1: Found an estimated cost of 20 for instruction: %ctpop
; AVX2: Found an estimated cost of 9 for instruction: %ctpop
%ctpop = call <16 x i16> @llvm.ctpop.v16i16(<16 x i16> %a)
ret <16 x i16> %ctpop
@@ -126,7 +126,7 @@ define <32 x i8> @var_ctpop_v32i8(<32 x i8> %a) {
; CHECK: 'Cost Model Analysis' for function 'var_ctpop_v32i8':
; SSE2: Found an estimated cost of 20 for instruction: %ctpop
; SSE42: Found an estimated cost of 12 for instruction: %ctpop
-; AVX1: Found an estimated cost of 12 for instruction: %ctpop
+; AVX1: Found an estimated cost of 14 for instruction: %ctpop
; AVX2: Found an estimated cost of 6 for instruction: %ctpop
%ctpop = call <32 x i8> @llvm.ctpop.v32i8(<32 x i8> %a)
ret <32 x i8> %ctpop
@@ -229,7 +229,7 @@ define <4 x i64> @var_ctlz_v4i64(<4 x i64> %a) {
; CHECK: 'Cost Model Analysis' for function 'var_ctlz_v4i64':
; SSE2: Found an estimated cost of 50 for instruction: %ctlz
; SSE42: Found an estimated cost of 46 for instruction: %ctlz
-; AVX1: Found an estimated cost of 46 for instruction: %ctlz
+; AVX1: Found an estimated cost of 48 for instruction: %ctlz
; AVX2: Found an estimated cost of 23 for instruction: %ctlz
%ctlz = call <4 x i64> @llvm.ctlz.v4i64(<4 x i64> %a, i1 0)
ret <4 x i64> %ctlz
@@ -239,7 +239,7 @@ define <4 x i64> @var_ctlz_v4i64u(<4 x i64> %a) {
; CHECK: 'Cost Model Analysis' for function 'var_ctlz_v4i64u':
; SSE2: Found an estimated cost of 50 for instruction: %ctlz
; SSE42: Found an estimated cost of 46 for instruction: %ctlz
-; AVX1: Found an estimated cost of 46 for instruction: %ctlz
+; AVX1: Found an estimated cost of 48 for instruction: %ctlz
; AVX2: Found an estimated cost of 23 for instruction: %ctlz
%ctlz = call <4 x i64> @llvm.ctlz.v4i64(<4 x i64> %a, i1 1)
ret <4 x i64> %ctlz
@@ -267,7 +267,7 @@ define <8 x i32> @var_ctlz_v8i32(<8 x i32> %a) {
; CHECK: 'Cost Model Analysis' for function 'var_ctlz_v8i32':
; SSE2: Found an estimated cost of 52 for instruction: %ctlz
; SSE42: Found an estimated cost of 36 for instruction: %ctlz
-; AVX1: Found an estimated cost of 36 for instruction: %ctlz
+; AVX1: Found an estimated cost of 38 for instruction: %ctlz
; AVX2: Found an estimated cost of 18 for instruction: %ctlz
%ctlz = call <8 x i32> @llvm.ctlz.v8i32(<8 x i32> %a, i1 0)
ret <8 x i32> %ctlz
@@ -277,7 +277,7 @@ define <8 x i32> @var_ctlz_v8i32u(<8 x i32> %a) {
; CHECK: 'Cost Model Analysis' for function 'var_ctlz_v8i32u':
; SSE2: Found an estimated cost of 52 for instruction: %ctlz
; SSE42: Found an estimated cost of 36 for instruction: %ctlz
-; AVX1: Found an estimated cost of 36 for instruction: %ctlz
+; AVX1: Found an estimated cost of 38 for instruction: %ctlz
; AVX2: Found an estimated cost of 18 for instruction: %ctlz
%ctlz = call <8 x i32> @llvm.ctlz.v8i32(<8 x i32> %a, i1 1)
ret <8 x i32> %ctlz
@@ -305,7 +305,7 @@ define <16 x i16> @var_ctlz_v16i16(<16 x i16> %a) {
; CHECK: 'Cost Model Analysis' for function 'var_ctlz_v16i16':
; SSE2: Found an estimated cost of 40 for instruction: %ctlz
; SSE42: Found an estimated cost of 28 for instruction: %ctlz
-; AVX1: Found an estimated cost of 28 for instruction: %ctlz
+; AVX1: Found an estimated cost of 30 for instruction: %ctlz
; AVX2: Found an estimated cost of 14 for instruction: %ctlz
%ctlz = call <16 x i16> @llvm.ctlz.v16i16(<16 x i16> %a, i1 0)
ret <16 x i16> %ctlz
@@ -315,7 +315,7 @@ define <16 x i16> @var_ctlz_v16i16u(<16 x i16> %a) {
; CHECK: 'Cost Model Analysis' for function 'var_ctlz_v16i16u':
; SSE2: Found an estimated cost of 40 for instruction: %ctlz
; SSE42: Found an estimated cost of 28 for instruction: %ctlz
-; AVX1: Found an estimated cost of 28 for instruction: %ctlz
+; AVX1: Found an estimated cost of 30 for instruction: %ctlz
; AVX2: Found an estimated cost of 14 for instruction: %ctlz
%ctlz = call <16 x i16> @llvm.ctlz.v16i16(<16 x i16> %a, i1 1)
ret <16 x i16> %ctlz
@@ -343,7 +343,7 @@ define <32 x i8> @var_ctlz_v32i8(<32 x i8> %a) {
; CHECK: 'Cost Model Analysis' for function 'var_ctlz_v32i8':
; SSE2: Found an estimated cost of 34 for instruction: %ctlz
; SSE42: Found an estimated cost of 18 for instruction: %ctlz
-; AVX1: Found an estimated cost of 18 for instruction: %ctlz
+; AVX1: Found an estimated cost of 20 for instruction: %ctlz
; AVX2: Found an estimated cost of 9 for instruction: %ctlz
%ctlz = call <32 x i8> @llvm.ctlz.v32i8(<32 x i8> %a, i1 0)
ret <32 x i8> %ctlz
@@ -353,7 +353,7 @@ define <32 x i8> @var_ctlz_v32i8u(<32 x i8> %a) {
; CHECK: 'Cost Model Analysis' for function 'var_ctlz_v32i8u':
; SSE2: Found an estimated cost of 34 for instruction: %ctlz
; SSE42: Found an estimated cost of 18 for instruction: %ctlz
-; AVX1: Found an estimated cost of 18 for instruction: %ctlz
+; AVX1: Found an estimated cost of 20 for instruction: %ctlz
; AVX2: Found an estimated cost of 9 for instruction: %ctlz
%ctlz = call <32 x i8> @llvm.ctlz.v32i8(<32 x i8> %a, i1 1)
ret <32 x i8> %ctlz
@@ -456,7 +456,7 @@ define <4 x i64> @var_cttz_v4i64(<4 x i64> %a) {
; CHECK: 'Cost Model Analysis' for function 'var_cttz_v4i64':
; SSE2: Found an estimated cost of 28 for instruction: %cttz
; SSE42: Found an estimated cost of 20 for instruction: %cttz
-; AVX1: Found an estimated cost of 20 for instruction: %cttz
+; AVX1: Found an estimated cost of 22 for instruction: %cttz
; AVX2: Found an estimated cost of 10 for instruction: %cttz
%cttz = call <4 x i64> @llvm.cttz.v4i64(<4 x i64> %a, i1 0)
ret <4 x i64> %cttz
@@ -466,7 +466,7 @@ define <4 x i64> @var_cttz_v4i64u(<4 x i64> %a) {
; CHECK: 'Cost Model Analysis' for function 'var_cttz_v4i64u':
; SSE2: Found an estimated cost of 28 for instruction: %cttz
; SSE42: Found an estimated cost of 20 for instruction: %cttz
-; AVX1: Found an estimated cost of 20 for instruction: %cttz
+; AVX1: Found an estimated cost of 22 for instruction: %cttz
; AVX2: Found an estimated cost of 10 for instruction: %cttz
%cttz = call <4 x i64> @llvm.cttz.v4i64(<4 x i64> %a, i1 1)
ret <4 x i64> %cttz
@@ -494,7 +494,7 @@ define <8 x i32> @var_cttz_v8i32(<8 x i32> %a) {
; CHECK: 'Cost Model Analysis' for function 'var_cttz_v8i32':
; SSE2: Found an estimated cost of 36 for instruction: %cttz
; SSE42: Found an estimated cost of 28 for instruction: %cttz
-; AVX1: Found an estimated cost of 28 for instruction: %cttz
+; AVX1: Found an estimated cost of 30 for instruction: %cttz
; AVX2: Found an estimated cost of 14 for instruction: %cttz
%cttz = call <8 x i32> @llvm.cttz.v8i32(<8 x i32> %a, i1 0)
ret <8 x i32> %cttz
@@ -504,7 +504,7 @@ define <8 x i32> @var_cttz_v8i32u(<8 x i32> %a) {
; CHECK: 'Cost Model Analysis' for function 'var_cttz_v8i32u':
; SSE2: Found an estimated cost of 36 for instruction: %cttz
; SSE42: Found an estimated cost of 28 for instruction: %cttz
-; AVX1: Found an estimated cost of 28 for instruction: %cttz
+; AVX1: Found an estimated cost of 30 for instruction: %cttz
; AVX2: Found an estimated cost of 14 for instruction: %cttz
%cttz = call <8 x i32> @llvm.cttz.v8i32(<8 x i32> %a, i1 1)
ret <8 x i32> %cttz
@@ -532,7 +532,7 @@ define <16 x i16> @var_cttz_v16i16(<16 x i16> %a) {
; CHECK: 'Cost Model Analysis' for function 'var_cttz_v16i16':
; SSE2: Found an estimated cost of 32 for instruction: %cttz
; SSE42: Found an estimated cost of 24 for instruction: %cttz
-; AVX1: Found an estimated cost of 24 for instruction: %cttz
+; AVX1: Found an estimated cost of 26 for instruction: %cttz
; AVX2: Found an estimated cost of 12 for instruction: %cttz
%cttz = call <16 x i16> @llvm.cttz.v16i16(<16 x i16> %a, i1 0)
ret <16 x i16> %cttz
@@ -542,7 +542,7 @@ define <16 x i16> @var_cttz_v16i16u(<16 x i16> %a) {
; CHECK: 'Cost Model Analysis' for function 'var_cttz_v16i16u':
; SSE2: Found an estimated cost of 32 for instruction: %cttz
; SSE42: Found an estimated cost of 24 for instruction: %cttz
-; AVX1: Found an estimated cost of 24 for instruction: %cttz
+; AVX1: Found an estimated cost of 26 for instruction: %cttz
; AVX2: Found an estimated cost of 12 for instruction: %cttz
%cttz = call <16 x i16> @llvm.cttz.v16i16(<16 x i16> %a, i1 1)
ret <16 x i16> %cttz
@@ -570,7 +570,7 @@ define <32 x i8> @var_cttz_v32i8(<32 x i8> %a) {
; CHECK: 'Cost Model Analysis' for function 'var_cttz_v32i8':
; SSE2: Found an estimated cost of 26 for instruction: %cttz
; SSE42: Found an estimated cost of 18 for instruction: %cttz
-; AVX1: Found an estimated cost of 18 for instruction: %cttz
+; AVX1: Found an estimated cost of 20 for instruction: %cttz
; AVX2: Found an estimated cost of 9 for instruction: %cttz
%cttz = call <32 x i8> @llvm.cttz.v32i8(<32 x i8> %a, i1 0)
ret <32 x i8> %cttz
@@ -580,7 +580,7 @@ define <32 x i8> @var_cttz_v32i8u(<32 x i8> %a) {
; CHECK: 'Cost Model Analysis' for function 'var_cttz_v32i8u':
; SSE2: Found an estimated cost of 26 for instruction: %cttz
; SSE42: Found an estimated cost of 18 for instruction: %cttz
-; AVX1: Found an estimated cost of 18 for instruction: %cttz
+; AVX1: Found an estimated cost of 20 for instruction: %cttz
; AVX2: Found an estimated cost of 9 for instruction: %cttz
%cttz = call <32 x i8> @llvm.cttz.v32i8(<32 x i8> %a, i1 1)
ret <32 x i8> %cttz
diff --git a/test/Analysis/ScalarEvolution/ZeroStep.ll b/test/Analysis/ScalarEvolution/ZeroStep.ll
new file mode 100644
index 000000000000..fc6ed018e903
--- /dev/null
+++ b/test/Analysis/ScalarEvolution/ZeroStep.ll
@@ -0,0 +1,18 @@
+; RUN: opt -analyze -scalar-evolution < %s -o - -S | FileCheck %s
+
+target datalayout = "e-m:o-i64:64-f80:128-n8:16:32:64-S128"
+target triple = "x86_64-apple-macosx10.9.0"
+
+; Test that SCEV is capable of figuring out value of 'IV' that actually does not change.
+; CHECK: Classifying expressions for: @foo
+; CHECK: %iv.i = phi i64
+; CHECK: -5 U: [-5,-4) S: [-5,-4) Exits: -5 LoopDispositions: { %loop: Invariant }
+define void @foo() {
+entry:
+ br label %loop
+
+loop:
+ %iv.i = phi i64 [ -5, %entry ], [ %iv.next.i, %loop ]
+ %iv.next.i = add nsw i64 %iv.i, 0
+ br label %loop
+}
diff --git a/test/CodeGen/AArch64/GlobalISel/arm64-irtranslator.ll b/test/CodeGen/AArch64/GlobalISel/arm64-irtranslator.ll
index 02848021dbc0..ac3d4b17f739 100644
--- a/test/CodeGen/AArch64/GlobalISel/arm64-irtranslator.ll
+++ b/test/CodeGen/AArch64/GlobalISel/arm64-irtranslator.ll
@@ -1541,3 +1541,12 @@ define <16 x i8> @test_shufflevector_v8s8_v16s8(<8 x i8> %arg1, <8 x i8> %arg2)
%res = shufflevector <8 x i8> %arg1, <8 x i8> %arg2, <16 x i32> <i32 0, i32 8, i32 1, i32 9, i32 2, i32 10, i32 3, i32 11, i32 4, i32 12, i32 5, i32 13, i32 6, i32 14, i32 7, i32 15>
ret <16 x i8> %res
}
+
+; CHECK-LABEL: test_constant_vector
+; CHECK: [[UNDEF:%[0-9]+]](s16) = IMPLICIT_DEF
+; CHECK: [[F:%[0-9]+]](s16) = G_FCONSTANT half 0xH3C00
+; CHECK: [[M:%[0-9]+]](<4 x s16>) = G_MERGE_VALUES [[UNDEF]](s16), [[UNDEF]](s16), [[UNDEF]](s16), [[F]](s16)
+; CHECK: %d0 = COPY [[M]](<4 x s16>)
+define <4 x half> @test_constant_vector() {
+ ret <4 x half> <half undef, half undef, half undef, half 0xH3C00>
+}
diff --git a/test/CodeGen/AArch64/GlobalISel/debug-insts.ll b/test/CodeGen/AArch64/GlobalISel/debug-insts.ll
index 5a76661180f2..e01bd2a9f7c8 100644
--- a/test/CodeGen/AArch64/GlobalISel/debug-insts.ll
+++ b/test/CodeGen/AArch64/GlobalISel/debug-insts.ll
@@ -12,33 +12,33 @@ entry:
store i32 %in, i32* %in.addr, align 4
call void @llvm.dbg.declare(metadata i32* %in.addr, metadata !11, metadata !12), !dbg !13
call void @llvm.dbg.declare(metadata i32 %in, metadata !11, metadata !12), !dbg !13
- ret void, !dbg !14
+ ret void, !dbg !13
}
; CHECK-LABEL: name: debug_declare_vla
-; CHECK: DBG_VALUE debug-use %{{[0-9]+}}(p0), debug-use _, !11, !12, debug-location !13
-define void @debug_declare_vla(i32 %in) #0 !dbg !7 {
+; CHECK: DBG_VALUE debug-use %{{[0-9]+}}(p0), debug-use _, !15, !12, debug-location !16
+define void @debug_declare_vla(i32 %in) #0 !dbg !14 {
entry:
%vla.addr = alloca i32, i32 %in
- call void @llvm.dbg.declare(metadata i32* %vla.addr, metadata !11, metadata !12), !dbg !13
- ret void, !dbg !14
+ call void @llvm.dbg.declare(metadata i32* %vla.addr, metadata !15, metadata !12), !dbg !16
+ ret void, !dbg !16
}
; CHECK-LABEL: name: debug_value
; CHECK: [[IN:%[0-9]+]](s32) = COPY %w0
-define void @debug_value(i32 %in) #0 !dbg !7 {
+define void @debug_value(i32 %in) #0 !dbg !17 {
%addr = alloca i32
-; CHECK: DBG_VALUE debug-use [[IN]](s32), debug-use _, !11, !12, debug-location !13
- call void @llvm.dbg.value(metadata i32 %in, i64 0, metadata !11, metadata !12), !dbg !13
+; CHECK: DBG_VALUE debug-use [[IN]](s32), debug-use _, !18, !12, debug-location !19
+ call void @llvm.dbg.value(metadata i32 %in, i64 0, metadata !18, metadata !12), !dbg !19
store i32 %in, i32* %addr
-; CHECK: DBG_VALUE debug-use %1(p0), debug-use _, !11, !15, debug-location !13
- call void @llvm.dbg.value(metadata i32* %addr, i64 0, metadata !11, metadata !15), !dbg !13
-; CHECK: DBG_VALUE 123, 0, !11, !12, debug-location !13
- call void @llvm.dbg.value(metadata i32 123, i64 0, metadata !11, metadata !12), !dbg !13
-; CHECK: DBG_VALUE float 1.000000e+00, 0, !11, !12, debug-location !13
- call void @llvm.dbg.value(metadata float 1.000000e+00, i64 0, metadata !11, metadata !12), !dbg !13
-; CHECK: DBG_VALUE _, 0, !11, !12, debug-location !13
- call void @llvm.dbg.value(metadata i32* null, i64 0, metadata !11, metadata !12), !dbg !13
+; CHECK: DBG_VALUE debug-use %1(p0), debug-use _, !18, !20, debug-location !19
+ call void @llvm.dbg.value(metadata i32* %addr, i64 0, metadata !18, metadata !20), !dbg !19
+; CHECK: DBG_VALUE 123, 0, !18, !12, debug-location !19
+ call void @llvm.dbg.value(metadata i32 123, i64 0, metadata !18, metadata !12), !dbg !19
+; CHECK: DBG_VALUE float 1.000000e+00, 0, !18, !12, debug-location !19
+ call void @llvm.dbg.value(metadata float 1.000000e+00, i64 0, metadata !18, metadata !12), !dbg !19
+; CHECK: DBG_VALUE _, 0, !18, !12, debug-location !19
+ call void @llvm.dbg.value(metadata i32* null, i64 0, metadata !18, metadata !12), !dbg !19
ret void
}
@@ -64,5 +64,10 @@ declare void @llvm.dbg.value(metadata, i64, metadata, metadata)
!11 = !DILocalVariable(name: "in", arg: 1, scope: !7, file: !1, line: 1, type: !10)
!12 = !DIExpression()
!13 = !DILocation(line: 1, column: 14, scope: !7)
-!14 = !DILocation(line: 2, column: 1, scope: !7)
-!15 = !DIExpression(DW_OP_deref)
+!14 = distinct !DISubprogram(name: "foo", scope: !1, file: !1, line: 1, type: !8, isLocal: false, isDefinition: true, scopeLine: 1, flags: DIFlagPrototyped, isOptimized: false, unit: !0, variables: !2)
+!15 = !DILocalVariable(name: "in", arg: 1, scope: !14, file: !1, line: 1, type: !10)
+!16 = !DILocation(line: 1, column: 14, scope: !14)
+!17 = distinct !DISubprogram(name: "foo", scope: !1, file: !1, line: 1, type: !8, isLocal: false, isDefinition: true, scopeLine: 1, flags: DIFlagPrototyped, isOptimized: false, unit: !0, variables: !2)
+!18 = !DILocalVariable(name: "in", arg: 1, scope: !17, file: !1, line: 1, type: !10)
+!19 = !DILocation(line: 1, column: 14, scope: !17)
+!20 = !DIExpression(DW_OP_deref)
diff --git a/test/CodeGen/AArch64/GlobalISel/select-dbg-value.mir b/test/CodeGen/AArch64/GlobalISel/select-dbg-value.mir
index 2f36ec8d2aaa..790cd6517dd3 100644
--- a/test/CodeGen/AArch64/GlobalISel/select-dbg-value.mir
+++ b/test/CodeGen/AArch64/GlobalISel/select-dbg-value.mir
@@ -9,8 +9,8 @@
ret void
}
- define void @test_dbg_value_dead(i32 %a) !dbg !5 {
- call void @llvm.dbg.value(metadata i32 0, i64 0, metadata !7, metadata !9), !dbg !10
+ define void @test_dbg_value_dead(i32 %a) !dbg !11 {
+ call void @llvm.dbg.value(metadata i32 0, i64 0, metadata !12, metadata !9), !dbg !13
ret void
}
@@ -30,6 +30,9 @@
!8 = !DIBasicType(name: "int", size: 32, encoding: DW_ATE_signed)
!9 = !DIExpression()
!10 = !DILocation(line: 1, column: 1, scope: !5)
+ !11 = distinct !DISubprogram(name: "test_dbg_value", scope: !1, file: !1, line: 1, type: !6, isLocal: false, isDefinition: true, scopeLine: 1, flags: DIFlagPrototyped, isOptimized: false, unit: !0, variables: !2)
+ !12 = !DILocalVariable(name: "in", arg: 1, scope: !11, file: !1, line: 1, type: !8)
+ !13 = !DILocation(line: 1, column: 1, scope: !11)
...
---
diff --git a/test/CodeGen/AArch64/fadd-combines.ll b/test/CodeGen/AArch64/fadd-combines.ll
new file mode 100644
index 000000000000..c106f293ccff
--- /dev/null
+++ b/test/CodeGen/AArch64/fadd-combines.ll
@@ -0,0 +1,78 @@
+; RUN: llc < %s -mtriple=aarch64-none-linux-gnu -verify-machineinstrs | FileCheck %s
+
+; CHECK-LABEL: test1:
+; CHECK: fadd d1, d1, d1
+; CHECK: fsub d0, d0, d1
+define double @test1(double %a, double %b) local_unnamed_addr #0 {
+entry:
+ %mul = fmul double %b, -2.000000e+00
+ %add1 = fadd double %a, %mul
+ ret double %add1
+}
+
+; DAGCombine will canonicalize 'a - 2.0*b' to 'a + -2.0*b'
+; CHECK-LABEL: test2:
+; CHECK: fadd d1, d1, d1
+; CHECK: fsub d0, d0, d1
+define double @test2(double %a, double %b) local_unnamed_addr #0 {
+entry:
+ %mul = fmul double %b, 2.000000e+00
+ %add1 = fsub double %a, %mul
+ ret double %add1
+}
+
+; CHECK-LABEL: test3:
+; CHECK: fmul d0, d0, d1
+; CHECK: fadd d1, d2, d2
+; CHECK: fsub d0, d0, d1
+define double @test3(double %a, double %b, double %c) local_unnamed_addr #0 {
+entry:
+ %mul = fmul double %a, %b
+ %mul1 = fmul double %c, 2.000000e+00
+ %sub = fsub double %mul, %mul1
+ ret double %sub
+}
+
+; CHECK-LABEL: test4:
+; CHECK: fmul d0, d0, d1
+; CHECK: fadd d1, d2, d2
+; CHECK: fsub d0, d0, d1
+define double @test4(double %a, double %b, double %c) local_unnamed_addr #0 {
+entry:
+ %mul = fmul double %a, %b
+ %mul1 = fmul double %c, -2.000000e+00
+ %add2 = fadd double %mul, %mul1
+ ret double %add2
+}
+
+; CHECK-LABEL: test5:
+; CHECK: fadd v1.4s, v1.4s, v1.4s
+; CHECK: fsub v0.4s, v0.4s, v1.4s
+define <4 x float> @test5(<4 x float> %a, <4 x float> %b) {
+ %mul = fmul <4 x float> %b, <float -2.0, float -2.0, float -2.0, float -2.0>
+ %add = fadd <4 x float> %a, %mul
+ ret <4 x float> %add
+}
+
+; CHECK-LABEL: test6:
+; CHECK: fadd v1.4s, v1.4s, v1.4s
+; CHECK: fsub v0.4s, v0.4s, v1.4s
+define <4 x float> @test6(<4 x float> %a, <4 x float> %b) {
+ %mul = fmul <4 x float> %b, <float 2.0, float 2.0, float 2.0, float 2.0>
+ %add = fsub <4 x float> %a, %mul
+ ret <4 x float> %add
+}
+
+; Don't fold (fadd A, (fmul B, -2.0)) -> (fsub A, (fadd B, B)) if the fmul has
+; multiple uses.
+; CHECK-LABEL: test7:
+; CHECK: fmul
+define double @test7(double %a, double %b) local_unnamed_addr #0 {
+entry:
+ %mul = fmul double %b, -2.000000e+00
+ %add1 = fadd double %a, %mul
+ call void @use(double %mul)
+ ret double %add1
+}
+
+declare void @use(double)
diff --git a/test/CodeGen/AArch64/loh.mir b/test/CodeGen/AArch64/loh.mir
index 1d08ebdc5790..6e4bb5cfaee6 100644
--- a/test/CodeGen/AArch64/loh.mir
+++ b/test/CodeGen/AArch64/loh.mir
@@ -180,7 +180,6 @@ body: |
%x9 = ADRP target-flags(aarch64-page, aarch64-got) @g5
bb.13:
- successors: %bb.14
; Cannot produce a LOH for multiple users
; CHECK-NOT: MCLOH_AdrpAdd
%x10 = ADRP target-flags(aarch64-page) @g0
diff --git a/test/CodeGen/AArch64/machine-copy-remove.mir b/test/CodeGen/AArch64/machine-copy-remove.mir
index 6f2d3a3009b0..50c03ddb4037 100644
--- a/test/CodeGen/AArch64/machine-copy-remove.mir
+++ b/test/CodeGen/AArch64/machine-copy-remove.mir
@@ -7,20 +7,16 @@ name: test1
tracksRegLiveness: true
body: |
bb.0:
- successors: %bb.1, %bb.2
liveins: %x0, %x1
%x0 = COPY %x1
CBNZX %x1, %bb.2
bb.1:
- successors: %bb.3
-
%x0 = COPY %xzr
B %bb.3
bb.2:
- successors: %bb.3
liveins: %x1
%x0 = LDRXui %x1, 0
@@ -38,20 +34,16 @@ name: test2
tracksRegLiveness: true
body: |
bb.0:
- successors: %bb.1, %bb.2
liveins: %x0, %x1
%x1 = COPY %x0
CBNZX %x1, %bb.2
bb.1:
- successors: %bb.3
-
%x0 = COPY %xzr
B %bb.3
bb.2:
- successors: %bb.3
liveins: %x1
%x0 = LDRXui %x1, 0
@@ -69,7 +61,6 @@ name: test3
tracksRegLiveness: true
body: |
bb.0:
- successors: %bb.1, %bb.2
liveins: %x0, %x1, %x2
%x0 = COPY %x1
@@ -77,13 +68,10 @@ body: |
CBNZX %x1, %bb.2
bb.1:
- successors: %bb.3
-
%x0 = COPY %xzr
B %bb.3
bb.2:
- successors: %bb.3
liveins: %x1
%x0 = LDRXui %x1, 0
@@ -101,7 +89,6 @@ name: test4
tracksRegLiveness: true
body: |
bb.0:
- successors: %bb.1, %bb.2
liveins: %x0, %x1, %x2
%x1 = COPY %x0
@@ -109,13 +96,10 @@ body: |
CBNZX %x1, %bb.2
bb.1:
- successors: %bb.3
-
%x0 = COPY %xzr
B %bb.3
bb.2:
- successors: %bb.3
liveins: %x1
%x0 = LDRXui %x1, 0
@@ -133,7 +117,6 @@ name: test5
tracksRegLiveness: true
body: |
bb.0:
- successors: %bb.1, %bb.2
liveins: %x0, %x1, %x2
%x1 = COPY %x0
@@ -141,13 +124,10 @@ body: |
CBNZX %x1, %bb.2
bb.1:
- successors: %bb.3
-
%x0 = COPY %xzr
B %bb.3
bb.2:
- successors: %bb.3
liveins: %x1
%x0 = LDRXui %x1, 0
@@ -165,7 +145,6 @@ name: test6
tracksRegLiveness: true
body: |
bb.0:
- successors: %bb.1, %bb.2
liveins: %x0, %x1, %x2
%x2 = COPY %x0
@@ -173,13 +152,10 @@ body: |
CBNZX %x1, %bb.2
bb.1:
- successors: %bb.3
-
%x0 = COPY %xzr
B %bb.3
bb.2:
- successors: %bb.3
liveins: %x1
%x0 = LDRXui %x1, 0
@@ -197,7 +173,6 @@ name: test7
tracksRegLiveness: true
body: |
bb.0:
- successors: %bb.1, %bb.2
liveins: %x0, %x1, %x2
%x2 = COPY %x0
@@ -206,13 +181,10 @@ body: |
CBNZX %x1, %bb.2
bb.1:
- successors: %bb.3
-
%x0 = COPY %xzr
B %bb.3
bb.2:
- successors: %bb.3
liveins: %x1
%x0 = LDRXui %x1, 0
@@ -232,14 +204,12 @@ name: test8
tracksRegLiveness: true
body: |
bb.0:
- successors: %bb.1, %bb.2
liveins: %x0, %x1
%x1 = COPY %x0
CBNZX %x1, %bb.2
bb.1:
- successors: %bb.3
liveins: %x0, %x2
%x0, %x1 = LDPXi %x2, 0
@@ -248,7 +218,6 @@ body: |
B %bb.3
bb.2:
- successors: %bb.3
liveins: %x1
%x0 = LDRXui %x1, 0
@@ -267,20 +236,17 @@ name: test9
tracksRegLiveness: true
body: |
bb.0:
- successors: %bb.1, %bb.2
liveins: %x0, %x1
CBNZX %x0, %bb.2
bb.1:
- successors: %bb.3
liveins: %x0, %x2
%x0 = COPY %xzr
B %bb.3
bb.2:
- successors: %bb.1, %bb.3
liveins: %x1
%x0 = LDRXui %x1, 0
@@ -304,7 +270,6 @@ name: test10
tracksRegLiveness: true
body: |
bb.0.entry:
- successors: %bb.1, %bb.2
liveins: %w0, %x1
dead %wzr = SUBSWri killed %w0, 7, 0, implicit-def %nzcv
@@ -312,7 +277,6 @@ body: |
B %bb.1
bb.1:
- successors: %bb.2
liveins: %x1
%w0 = MOVi32imm 7
@@ -332,7 +296,6 @@ name: test11
tracksRegLiveness: true
body: |
bb.0.entry:
- successors: %bb.1, %bb.2
liveins: %x0, %x1
dead %xzr = SUBSXri killed %x0, 7, 0, implicit-def %nzcv
@@ -340,7 +303,6 @@ body: |
B %bb.1
bb.1:
- successors: %bb.2
liveins: %x1
%w0 = MOVi32imm 7, implicit-def %x0
@@ -360,7 +322,6 @@ name: test12
tracksRegLiveness: true
body: |
bb.0.entry:
- successors: %bb.1, %bb.2
liveins: %x0, %x1
dead %xzr = SUBSXri killed %x0, 7, 0, implicit-def %nzcv
@@ -368,7 +329,6 @@ body: |
B %bb.1
bb.1:
- successors: %bb.2
liveins: %x1
%w0 = MOVi32imm 7
@@ -388,7 +348,6 @@ name: test13
tracksRegLiveness: true
body: |
bb.0.entry:
- successors: %bb.1, %bb.2
liveins: %w0, %x1
dead %wzr = SUBSWri killed %w0, 7, 0, implicit-def %nzcv
@@ -396,7 +355,6 @@ body: |
B %bb.1
bb.1:
- successors: %bb.2
liveins: %x1
%w0 = MOVi32imm 7, implicit-def %x0
@@ -413,7 +371,6 @@ name: test14
tracksRegLiveness: true
body: |
bb.0.entry:
- successors: %bb.1, %bb.2
liveins: %w0, %x1, %x2
dead %wzr = SUBSWri killed %w0, 7, 0, implicit-def %nzcv
@@ -423,7 +380,6 @@ body: |
B %bb.1
bb.1:
- successors: %bb.2
liveins: %x1
%w0 = MOVi32imm 7
@@ -440,7 +396,6 @@ name: test15
tracksRegLiveness: true
body: |
bb.0.entry:
- successors: %bb.1, %bb.2
liveins: %w0, %x1, %x2
dead %wzr = SUBSWri killed %w0, 7, 0, implicit-def %nzcv
@@ -448,7 +403,6 @@ body: |
B %bb.1
bb.1:
- successors: %bb.2
liveins: %x1, %x2
%w0 = LDRWui %x1, 0
@@ -467,7 +421,6 @@ name: test16
tracksRegLiveness: true
body: |
bb.0.entry:
- successors: %bb.1, %bb.2
liveins: %w0, %x1
dead %wzr = SUBSWri %w0, 7, 0, implicit-def %nzcv
@@ -476,7 +429,6 @@ body: |
B %bb.1
bb.1:
- successors: %bb.2
liveins: %x1
%w2 = MOVi32imm 7
@@ -493,7 +445,6 @@ name: test17
tracksRegLiveness: true
body: |
bb.0.entry:
- successors: %bb.1, %bb.2
liveins: %w0, %x1
dead %w0 = SUBSWri killed %w0, 7, 0, implicit-def %nzcv
@@ -501,7 +452,6 @@ body: |
B %bb.1
bb.1:
- successors: %bb.2
liveins: %x1
%w0 = MOVi32imm 7
@@ -520,14 +470,12 @@ name: test18
tracksRegLiveness: true
body: |
bb.0.entry:
- successors: %bb.1, %bb.2
liveins: %x0, %x1
CBNZX killed %x0, %bb.2
B %bb.1
bb.1:
- successors: %bb.2
liveins: %x1
%x0 = MOVi64imm 4252017623040
@@ -547,7 +495,6 @@ name: test19
tracksRegLiveness: true
body: |
bb.0.entry:
- successors: %bb.1, %bb.2
liveins: %w0, %x1
dead %wzr = ADDSWri killed %w0, 1, 0, implicit-def %nzcv
@@ -555,7 +502,6 @@ body: |
B %bb.1
bb.1:
- successors: %bb.2
liveins: %x1
%w0 = MOVi32imm -1
@@ -575,7 +521,6 @@ name: test20
tracksRegLiveness: true
body: |
bb.0:
- successors: %bb.1, %bb.2
liveins: %x0, %x1
dead %xzr = ADDSXri killed %x0, 1, 0, implicit-def %nzcv
@@ -583,7 +528,6 @@ body: |
B %bb.1
bb.1:
- successors: %bb.2
liveins: %x1
%x0 = MOVi64imm -1
@@ -603,7 +547,6 @@ name: test21
tracksRegLiveness: true
body: |
bb.0.entry:
- successors: %bb.1, %bb.2
liveins: %x0, %x1
dead %xzr = ADDSXri killed %x0, 1, 0, implicit-def %nzcv
@@ -611,7 +554,6 @@ body: |
B %bb.1
bb.1:
- successors: %bb.2
liveins: %x1
%w0 = MOVi32imm -1
@@ -629,7 +571,6 @@ name: test22
tracksRegLiveness: true
body: |
bb.0.entry:
- successors: %bb.1, %bb.2
liveins: %w0, %x1
dead %wzr = ADDSWri killed %w0, 1, 0, implicit-def %nzcv
@@ -637,7 +578,6 @@ body: |
B %bb.1
bb.1:
- successors: %bb.2
liveins: %x1
%x0 = MOVi64imm -1
@@ -654,7 +594,6 @@ name: test23
tracksRegLiveness: true
body: |
bb.0.entry:
- successors: %bb.1, %bb.2
liveins: %w0, %x1
dead %wzr = SUBSWri killed %w0, 1, 12, implicit-def %nzcv
@@ -662,7 +601,6 @@ body: |
B %bb.1
bb.1:
- successors: %bb.2
liveins: %x1
%w0 = MOVi32imm 4096
diff --git a/test/CodeGen/AArch64/machine-sink-zr.mir b/test/CodeGen/AArch64/machine-sink-zr.mir
index 535fba0dc63b..2cf2bc488237 100644
--- a/test/CodeGen/AArch64/machine-sink-zr.mir
+++ b/test/CodeGen/AArch64/machine-sink-zr.mir
@@ -17,7 +17,6 @@ body: |
; CHECK-LABEL: bb.0:
; CHECK-NOT: COPY %wzr
bb.0:
- successors: %bb.3, %bb.1
liveins: %w0
%0 = COPY %w0
@@ -28,13 +27,9 @@ body: |
; CHECK: COPY %wzr
bb.1:
- successors: %bb.2
-
B %bb.2
bb.2:
- successors: %bb.3, %bb.2
-
%2 = PHI %0, %bb.1, %4, %bb.2
%w0 = COPY %1
%3 = SUBSWri %2, 1, 0, implicit-def dead %nzcv
diff --git a/test/CodeGen/AArch64/regcoal-physreg.mir b/test/CodeGen/AArch64/regcoal-physreg.mir
index 813106366968..f88b7482acac 100644
--- a/test/CodeGen/AArch64/regcoal-physreg.mir
+++ b/test/CodeGen/AArch64/regcoal-physreg.mir
@@ -93,7 +93,6 @@ body: |
name: func1
body: |
bb.0:
- successors: %bb.1, %bb.2
; Cannot coalesce physreg because we have reads on other CFG paths (we
; currently abort for any control flow)
; CHECK-NOT: %fp = SUBXri
@@ -117,7 +116,6 @@ body: |
name: func2
body: |
bb.0:
- successors: %bb.1, %bb.2
; We can coalesce copies from physreg to vreg across multiple blocks.
; CHECK-NOT: COPY
; CHECK: CBZX undef %x0, %bb.1
diff --git a/test/CodeGen/AArch64/xray-attribute-instrumentation.ll b/test/CodeGen/AArch64/xray-attribute-instrumentation.ll
index d0f5f40e156c..38b62a72a20f 100644
--- a/test/CodeGen/AArch64/xray-attribute-instrumentation.ll
+++ b/test/CodeGen/AArch64/xray-attribute-instrumentation.ll
@@ -26,6 +26,7 @@ define i32 @foo() nounwind noinline uwtable "function-instrument"="xray-always"
}
; CHECK: .p2align 4
; CHECK-NEXT: .xword .Lxray_synthetic_0
+; CHECK-NEXT: .xword .Lxray_fn_idx_synth_0
; CHECK-NEXT: .section xray_instr_map,{{.*}}
; CHECK-LABEL: Lxray_synthetic_0:
; CHECK: .xword .Lxray_sled_0
diff --git a/test/CodeGen/AArch64/xray-tail-call-sled.ll b/test/CodeGen/AArch64/xray-tail-call-sled.ll
index 6ada3ce8d551..fb89950b99c8 100644
--- a/test/CodeGen/AArch64/xray-tail-call-sled.ll
+++ b/test/CodeGen/AArch64/xray-tail-call-sled.ll
@@ -29,10 +29,16 @@ define i32 @callee() nounwind noinline uwtable "function-instrument"="xray-alway
}
; CHECK: .p2align 4
; CHECK-NEXT: .xword .Lxray_synthetic_0
+; CHECK-NEXT: .xword .Lxray_fn_idx_synth_0
; CHECK-NEXT: .section xray_instr_map,{{.*}}
; CHECK-LABEL: Lxray_synthetic_0:
; CHECK: .xword .Lxray_sled_0
; CHECK: .xword .Lxray_sled_1
+; CHECK-LABEL: Lxray_synthetic_end0:
+; CHECK: .section xray_fn_idx,{{.*}}
+; CHECK-LABEL: Lxray_fn_idx_synth_0:
+; CHECK: .xword .Lxray_synthetic_0
+; CHECK-NEXT: .xword .Lxray_synthetic_end0
define i32 @caller() nounwind noinline uwtable "function-instrument"="xray-always" {
; CHECK: .p2align 2
@@ -63,7 +69,13 @@ define i32 @caller() nounwind noinline uwtable "function-instrument"="xray-alway
}
; CHECK: .p2align 4
; CHECK-NEXT: .xword .Lxray_synthetic_1
+; CHECK-NEXT: .xword .Lxray_fn_idx_synth_1
; CHECK-NEXT: .section xray_instr_map,{{.*}}
; CHECK-LABEL: Lxray_synthetic_1:
; CHECK: .xword .Lxray_sled_2
; CHECK: .xword .Lxray_sled_3
+; CHECK-LABEL: Lxray_synthetic_end1:
+; CHECK: .section xray_fn_idx,{{.*}}
+; CHECK-LABEL: Lxray_fn_idx_synth_1:
+; CHECK: .xword .Lxray_synthetic_1
+; CHECK-NEXT: .xword .Lxray_synthetic_end1
diff --git a/test/CodeGen/AMDGPU/detect-dead-lanes.mir b/test/CodeGen/AMDGPU/detect-dead-lanes.mir
index 32e6f7cc0cdc..3148b9b8ff9d 100644
--- a/test/CodeGen/AMDGPU/detect-dead-lanes.mir
+++ b/test/CodeGen/AMDGPU/detect-dead-lanes.mir
@@ -294,7 +294,6 @@ registers:
- { id: 5, class: sreg_128 }
body: |
bb.0:
- successors: %bb.1
S_NOP 0, implicit-def %0
S_NOP 0, implicit-def %1
S_NOP 0, implicit-def %2
@@ -302,7 +301,6 @@ body: |
S_BRANCH %bb.1
bb.1:
- successors: %bb.1, %bb.2
%4 = PHI %3, %bb.0, %5, %bb.1
; let's swiffle some lanes around for fun...
@@ -348,7 +346,6 @@ registers:
- { id: 6, class: sreg_128 }
body: |
bb.0:
- successors: %bb.1
S_NOP 0, implicit-def %0
S_NOP 0, implicit-def %1
S_NOP 0, implicit-def dead %2
@@ -357,7 +354,6 @@ body: |
S_BRANCH %bb.1
bb.1:
- successors: %bb.1, %bb.2
%5 = PHI %4, %bb.0, %6, %bb.1
; rotate lanes, but skip sub2 lane...
@@ -396,13 +392,11 @@ registers:
- { id: 3, class: sreg_128 }
body: |
bb.0:
- successors: %bb.1
S_NOP 0, implicit-def %0
%1 = REG_SEQUENCE %0, %subreg.sub0
S_BRANCH %bb.1
bb.1:
- successors: %bb.1, %bb.2
%2 = PHI %1, %bb.0, %3, %bb.1
; rotate subreg lanes, skipping sub1
diff --git a/test/CodeGen/AMDGPU/fmuladd.f32.ll b/test/CodeGen/AMDGPU/fmuladd.f32.ll
index fb605dd2e4bd..e42255026692 100644
--- a/test/CodeGen/AMDGPU/fmuladd.f32.ll
+++ b/test/CodeGen/AMDGPU/fmuladd.f32.ll
@@ -191,8 +191,8 @@ define amdgpu_kernel void @fadd_b_a_a_f32(float addrspace(1)* %out,
; GCN-DENORM-FASTFMA: v_fma_f32 [[RESULT:v[0-9]+]], [[R1]], -2.0, [[R2]]
-; GCN-DENORM-SLOWFMA: v_mul_f32_e32 [[TMP:v[0-9]+]], -2.0, [[R1]]
-; GCN-DENORM-SLOWFMA: v_add_f32_e32 [[RESULT:v[0-9]+]], [[R2]], [[TMP]]
+; GCN-DENORM-SLOWFMA: v_add_f32_e32 [[TMP:v[0-9]+]], [[R1]], [[R1]]
+; GCN-DENORM-SLOWFMA: v_subrev_f32_e32 [[RESULT:v[0-9]+]], [[TMP]], [[R2]]
; SI-DENORM: buffer_store_dword [[RESULT]]
; VI-DENORM: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[RESULT]]
@@ -251,8 +251,8 @@ define amdgpu_kernel void @fmuladd_neg_2.0_neg_a_b_f32(float addrspace(1)* %out,
; GCN-DENORM-FASTFMA: v_fma_f32 [[RESULT:v[0-9]+]], -[[R1]], 2.0, [[R2]]
-; GCN-DENORM-SLOWFMA: v_mul_f32_e32 [[TMP:v[0-9]+]], -2.0, [[R1]]
-; GCN-DENORM-SLOWFMA: v_add_f32_e32 [[RESULT:v[0-9]+]], [[TMP]], [[R2]]
+; GCN-DENORM-SLOWFMA: v_add_f32_e32 [[TMP:v[0-9]+]], [[R1]], [[R1]]
+; GCN-DENORM-SLOWFMA: v_subrev_f32_e32 [[RESULT:v[0-9]+]], [[TMP]], [[R2]]
; SI-DENORM: buffer_store_dword [[RESULT]]
; VI-DENORM: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[RESULT]]
diff --git a/test/CodeGen/AMDGPU/inserted-wait-states.mir b/test/CodeGen/AMDGPU/inserted-wait-states.mir
index 1479303712d0..c6fe6debd225 100644
--- a/test/CodeGen/AMDGPU/inserted-wait-states.mir
+++ b/test/CodeGen/AMDGPU/inserted-wait-states.mir
@@ -77,19 +77,16 @@ name: div_fmas
body: |
bb.0:
- successors: %bb.1
%vcc = S_MOV_B64 0
%vgpr0 = V_DIV_FMAS_F32 0, %vgpr1, 0, %vgpr2, 0, %vgpr3, 0, 0, implicit %vcc, implicit %exec
S_BRANCH %bb.1
bb.1:
- successors: %bb.2
implicit %vcc = V_CMP_EQ_I32_e32 %vgpr1, %vgpr2, implicit %exec
%vgpr0 = V_DIV_FMAS_F32 0, %vgpr1, 0, %vgpr2, 0, %vgpr3, 0, 0, implicit %vcc, implicit %exec
S_BRANCH %bb.2
bb.2:
- successors: %bb.3
%vcc = V_CMP_EQ_I32_e64 %vgpr1, %vgpr2, implicit %exec
%vgpr0 = V_DIV_FMAS_F32 0, %vgpr1, 0, %vgpr2, 0, %vgpr3, 0, 0, implicit %vcc, implicit %exec
S_BRANCH %bb.3
@@ -130,19 +127,16 @@ name: s_getreg
body: |
bb.0:
- successors: %bb.1
S_SETREG_B32 %sgpr0, 1
%sgpr1 = S_GETREG_B32 1
S_BRANCH %bb.1
bb.1:
- successors: %bb.2
S_SETREG_IMM32_B32 0, 1
%sgpr1 = S_GETREG_B32 1
S_BRANCH %bb.2
bb.2:
- successors: %bb.3
S_SETREG_B32 %sgpr0, 1
%sgpr1 = S_MOV_B32 0
%sgpr2 = S_GETREG_B32 1
@@ -178,13 +172,11 @@ name: s_setreg
body: |
bb.0:
- successors: %bb.1
S_SETREG_B32 %sgpr0, 1
S_SETREG_B32 %sgpr1, 1
S_BRANCH %bb.1
bb.1:
- successors: %bb.2
S_SETREG_B32 %sgpr0, 64
S_SETREG_B32 %sgpr1, 128
S_BRANCH %bb.2
@@ -237,7 +229,6 @@ name: vmem_gt_8dw_store
body: |
bb.0:
- successors: %bb.1
BUFFER_STORE_DWORD_OFFSET %vgpr3, %sgpr0_sgpr1_sgpr2_sgpr3, %sgpr4, 0, 0, 0, 0, implicit %exec
%vgpr3 = V_MOV_B32_e32 0, implicit %exec
BUFFER_STORE_DWORDX3_OFFSET %vgpr2_vgpr3_vgpr4, %sgpr0_sgpr1_sgpr2_sgpr3, 0, 0, 0, 0, 0, implicit %exec
@@ -310,19 +301,16 @@ name: readwrite_lane
body: |
bb.0:
- successors: %bb.1
%vgpr0,%sgpr0_sgpr1 = V_ADD_I32_e64 %vgpr1, %vgpr2, implicit %vcc, implicit %exec
%sgpr4 = V_READLANE_B32 %vgpr4, %sgpr0
S_BRANCH %bb.1
bb.1:
- successors: %bb.2
%vgpr0,%sgpr0_sgpr1 = V_ADD_I32_e64 %vgpr1, %vgpr2, implicit %vcc, implicit %exec
%vgpr4 = V_WRITELANE_B32 %sgpr0, %sgpr0
S_BRANCH %bb.2
bb.2:
- successors: %bb.3
%vgpr0,implicit %vcc = V_ADD_I32_e32 %vgpr1, %vgpr2, implicit %vcc, implicit %exec
%sgpr4 = V_READLANE_B32 %vgpr4, %vcc_lo
S_BRANCH %bb.3
@@ -352,7 +340,6 @@ name: rfe
body: |
bb.0:
- successors: %bb.1
S_SETREG_B32 %sgpr0, 3
S_RFE_B64 %sgpr2_sgpr3
S_BRANCH %bb.1
@@ -382,7 +369,6 @@ name: s_mov_fed_b32
body: |
bb.0:
- successors: %bb.1
%sgpr0 = S_MOV_FED_B32 %sgpr0
%sgpr0 = S_MOV_B32 %sgpr0
S_BRANCH %bb.1
@@ -423,19 +409,16 @@ name: s_movrel
body: |
bb.0:
- successors: %bb.1
%m0 = S_MOV_B32 0
%sgpr0 = S_MOVRELS_B32 %sgpr0, implicit %m0
S_BRANCH %bb.1
bb.1:
- successors: %bb.2
%m0 = S_MOV_B32 0
%sgpr0_sgpr1 = S_MOVRELS_B64 %sgpr0_sgpr1, implicit %m0
S_BRANCH %bb.2
bb.2:
- successors: %bb.3
%m0 = S_MOV_B32 0
%sgpr0 = S_MOVRELD_B32 %sgpr0, implicit %m0
S_BRANCH %bb.3
@@ -475,19 +458,16 @@ name: v_interp
body: |
bb.0:
- successors: %bb.1
%m0 = S_MOV_B32 0
%vgpr0 = V_INTERP_P1_F32 %vgpr0, 0, 0, implicit %m0, implicit %exec
S_BRANCH %bb.1
bb.1:
- successors: %bb.2
%m0 = S_MOV_B32 0
%vgpr0 = V_INTERP_P2_F32 %vgpr0, %vgpr1, 0, 0, implicit %m0, implicit %exec
S_BRANCH %bb.2
bb.2:
- successors: %bb.3
%m0 = S_MOV_B32 0
%vgpr0 = V_INTERP_P1_F32_16bank %vgpr0, 0, 0, implicit %m0, implicit %exec
S_BRANCH %bb.3
diff --git a/test/CodeGen/AMDGPU/invert-br-undef-vcc.mir b/test/CodeGen/AMDGPU/invert-br-undef-vcc.mir
index bc1dafe0ea1e..67642282f75b 100644
--- a/test/CodeGen/AMDGPU/invert-br-undef-vcc.mir
+++ b/test/CodeGen/AMDGPU/invert-br-undef-vcc.mir
@@ -53,7 +53,6 @@ frameInfo:
hasMustTailInVarArgFunc: false
body: |
bb.0.entry:
- successors: %bb.2.if, %bb.1.else
liveins: %sgpr0_sgpr1
%sgpr0_sgpr1 = S_LOAD_DWORDX2_IMM killed %sgpr0_sgpr1, 11, 0 :: (non-temporal dereferenceable invariant load 8 from `i64 addrspace(2)* undef`)
@@ -62,7 +61,6 @@ body: |
S_CBRANCH_VCCNZ %bb.2.if, implicit undef %vcc
bb.1.else:
- successors: %bb.3.done
liveins: %sgpr6, %sgpr7, %sgpr0_sgpr1_sgpr2_sgpr3:0x00000003
%vgpr0 = V_MOV_B32_e32 100, implicit %exec
@@ -71,7 +69,6 @@ body: |
S_BRANCH %bb.3.done
bb.2.if:
- successors: %bb.3.done
liveins: %sgpr6, %sgpr7, %sgpr0_sgpr1_sgpr2_sgpr3:0x00000003
%vgpr0 = V_MOV_B32_e32 9, implicit %exec
diff --git a/test/CodeGen/AMDGPU/lds-size.ll b/test/CodeGen/AMDGPU/lds-size.ll
index c65817abd489..ff78c3bcb18c 100644
--- a/test/CodeGen/AMDGPU/lds-size.ll
+++ b/test/CodeGen/AMDGPU/lds-size.ll
@@ -1,4 +1,5 @@
; RUN: llc -march=amdgcn < %s | FileCheck -check-prefix=ALL -check-prefix=GCN %s
+; RUN: llc -mtriple=amdgcn-amd-amdhsa < %s | FileCheck -check-prefix=ALL -check-prefix=HSA %s
; RUN: llc -march=r600 -mcpu=redwood < %s | FileCheck -check-prefix=ALL -check-prefix=EG %s
; This test makes sure we do not double count global values when they are
@@ -11,6 +12,9 @@
; EG-NEXT: .long 1
; ALL: {{^}}test:
+; HSA: granulated_lds_size = 0
+; HSA: workgroup_group_segment_byte_size = 4
+
; GCN: ; LDSByteSize: 4 bytes/workgroup (compile time only)
@lds = internal unnamed_addr addrspace(3) global i32 undef, align 4
diff --git a/test/CodeGen/AMDGPU/liveness.mir b/test/CodeGen/AMDGPU/liveness.mir
index 48762e3f2ab4..6fd8466492d0 100644
--- a/test/CodeGen/AMDGPU/liveness.mir
+++ b/test/CodeGen/AMDGPU/liveness.mir
@@ -16,13 +16,11 @@ registers:
- { id: 0, class: sreg_64 }
body: |
bb.0:
- successors: %bb.1, %bb.2
S_NOP 0, implicit-def undef %0.sub0
S_CBRANCH_VCCNZ %bb.1, implicit undef %vcc
S_BRANCH %bb.2
bb.1:
- successors: %bb.2
S_NOP 0, implicit-def %0.sub1
S_NOP 0, implicit %0.sub1
S_BRANCH %bb.2
diff --git a/test/CodeGen/AMDGPU/local-stack-slot-bug.ll b/test/CodeGen/AMDGPU/local-stack-slot-bug.ll
deleted file mode 100644
index d3e0f0be4b5f..000000000000
--- a/test/CodeGen/AMDGPU/local-stack-slot-bug.ll
+++ /dev/null
@@ -1,26 +0,0 @@
-; RUN: llc -march=amdgcn -mcpu=verde -mattr=+vgpr-spilling -verify-machineinstrs < %s | FileCheck %s
-; RUN: llc -march=amdgcn -mcpu=tonga -mattr=-flat-for-global -mattr=+vgpr-spilling -verify-machineinstrs < %s | FileCheck %s
-
-; This used to fail due to a v_add_i32 instruction with an illegal immediate
-; operand that was created during Local Stack Slot Allocation. Test case derived
-; from https://bugs.freedesktop.org/show_bug.cgi?id=96602
-;
-; CHECK-LABEL: {{^}}main:
-
-; CHECK-DAG: v_mov_b32_e32 [[K:v[0-9]+]], 0x200
-; CHECK-DAG: v_mov_b32_e32 [[ZERO:v[0-9]+]], 0x400{{$}}
-; CHECK-DAG: v_lshlrev_b32_e32 [[BYTES:v[0-9]+]], 2, v0
-; CHECK-DAG: v_and_b32_e32 [[CLAMP_IDX:v[0-9]+]], 0x1fc, [[BYTES]]
-
-; CHECK-DAG: v_or_b32_e32 [[LO_OFF:v[0-9]+]], [[CLAMP_IDX]], [[K]]
-; CHECK-DAG: v_or_b32_e32 [[HI_OFF:v[0-9]+]], [[CLAMP_IDX]], [[ZERO]]
-
-; CHECK: buffer_load_dword {{v[0-9]+}}, [[LO_OFF]], {{s\[[0-9]+:[0-9]+\]}}, {{s[0-9]+}} offen
-; CHECK: buffer_load_dword {{v[0-9]+}}, [[HI_OFF]], {{s\[[0-9]+:[0-9]+\]}}, {{s[0-9]+}} offen
-define amdgpu_ps float @main(i32 %idx) {
-main_body:
- %v1 = extractelement <81 x float> <float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float 0x3FE41CFEA0000000, float 0xBFE7A693C0000000, float 0xBFEA477C60000000, float 0xBFEBE5DC60000000, float 0xBFEC71C720000000, float 0xBFEBE5DC60000000, float 0xBFEA477C60000000, float 0xBFE7A693C0000000, float 0xBFE41CFEA0000000, float 0x3FDF9B13E0000000, float 0x3FDF9B1380000000, float 0x3FD5C53B80000000, float 0x3FD5C53B00000000, float 0x3FC6326AC0000000, float 0x3FC63269E0000000, float 0xBEE05CEB00000000, float 0xBEE086A320000000, float 0xBFC63269E0000000, float 0xBFC6326AC0000000, float 0xBFD5C53B80000000, float 0xBFD5C53B80000000, float 0xBFDF9B13E0000000, float 0xBFDF9B1460000000, float 0xBFE41CFE80000000, float 0x3FE7A693C0000000, float 0x3FEA477C20000000, float 0x3FEBE5DC40000000, float 0x3FEC71C6E0000000, float 0x3FEBE5DC40000000, float 0x3FEA477C20000000, float 0x3FE7A693C0000000, float 0xBFE41CFE80000000>, i32 %idx
- %v2 = extractelement <81 x float> <float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float 0xBFE41CFEA0000000, float 0xBFDF9B13E0000000, float 0xBFD5C53B80000000, float 0xBFC6326AC0000000, float 0x3EE0789320000000, float 0x3FC6326AC0000000, float 0x3FD5C53B80000000, float 0x3FDF9B13E0000000, float 0x3FE41CFEA0000000, float 0xBFE7A693C0000000, float 0x3FE7A693C0000000, float 0xBFEA477C20000000, float 0x3FEA477C20000000, float 0xBFEBE5DC40000000, float 0x3FEBE5DC40000000, float 0xBFEC71C720000000, float 0x3FEC71C6E0000000, float 0xBFEBE5DC60000000, float 0x3FEBE5DC40000000, float 0xBFEA477C20000000, float 0x3FEA477C20000000, float 0xBFE7A693C0000000, float 0x3FE7A69380000000, float 0xBFE41CFEA0000000, float 0xBFDF9B13E0000000, float 0xBFD5C53B80000000, float 0xBFC6326AC0000000, float 0x3EE0789320000000, float 0x3FC6326AC0000000, float 0x3FD5C53B80000000, float 0x3FDF9B13E0000000, float 0x3FE41CFE80000000>, i32 %idx
- %r = fadd float %v1, %v2
- ret float %r
-}
diff --git a/test/CodeGen/AMDGPU/optimize-if-exec-masking.mir b/test/CodeGen/AMDGPU/optimize-if-exec-masking.mir
index 2de6b59e59e9..b5dc9d9dac84 100644
--- a/test/CodeGen/AMDGPU/optimize-if-exec-masking.mir
+++ b/test/CodeGen/AMDGPU/optimize-if-exec-masking.mir
@@ -176,7 +176,6 @@ frameInfo:
hasMustTailInVarArgFunc: false
body: |
bb.0.main_body:
- successors: %bb.1.if, %bb.2.end
liveins: %vgpr0
%sgpr0_sgpr1 = COPY %exec
@@ -189,7 +188,6 @@ body: |
S_BRANCH %bb.1.if
bb.1.if:
- successors: %bb.2.end
liveins: %sgpr0_sgpr1
%sgpr7 = S_MOV_B32 61440
@@ -236,7 +234,6 @@ frameInfo:
hasMustTailInVarArgFunc: false
body: |
bb.0.main_body:
- successors: %bb.1.if, %bb.2.end
liveins: %vgpr0
%sgpr0_sgpr1 = COPY %exec
@@ -248,7 +245,6 @@ body: |
S_BRANCH %bb.1.if
bb.1.if:
- successors: %bb.2.end
liveins: %sgpr0_sgpr1
%sgpr7 = S_MOV_B32 61440
@@ -295,7 +291,6 @@ frameInfo:
hasMustTailInVarArgFunc: false
body: |
bb.0.main_body:
- successors: %bb.1.if, %bb.2.end
liveins: %vgpr0
%sgpr0_sgpr1 = COPY %exec
@@ -307,7 +302,6 @@ body: |
S_BRANCH %bb.1.if
bb.1.if:
- successors: %bb.2.end
liveins: %sgpr0_sgpr1
%sgpr7 = S_MOV_B32 61440
@@ -356,7 +350,6 @@ frameInfo:
hasMustTailInVarArgFunc: false
body: |
bb.0.main_body:
- successors: %bb.1.if, %bb.2.end
liveins: %vgpr0
%sgpr0_sgpr1 = COPY %exec
@@ -370,7 +363,6 @@ body: |
S_BRANCH %bb.1.if
bb.1.if:
- successors: %bb.2.end
liveins: %sgpr0_sgpr1
%sgpr7 = S_MOV_B32 61440
@@ -418,7 +410,6 @@ frameInfo:
hasMustTailInVarArgFunc: false
body: |
bb.0.main_body:
- successors: %bb.1.if, %bb.2.end
liveins: %vgpr0
%sgpr6 = S_MOV_B32 -1
@@ -433,7 +424,6 @@ body: |
S_BRANCH %bb.1.if
bb.1.if:
- successors: %bb.2.end
liveins: %sgpr0_sgpr1 , %sgpr4_sgpr5_sgpr6_sgpr7
%vgpr0 = BUFFER_LOAD_DWORD_OFFSET %sgpr4_sgpr5_sgpr6_sgpr7, 0, 0, 0, 0, 0, implicit %exec :: (volatile load 4 from `i32 addrspace(1)* undef`)
@@ -480,7 +470,6 @@ frameInfo:
hasMustTailInVarArgFunc: false
body: |
bb.0.main_body:
- successors: %bb.1.if, %bb.2.end
liveins: %vgpr0
%sgpr0_sgpr1 = COPY %exec
@@ -494,7 +483,6 @@ body: |
S_BRANCH %bb.1.if
bb.1.if:
- successors: %bb.2.end
liveins: %sgpr0_sgpr1
%sgpr7 = S_MOV_B32 61440
@@ -544,7 +532,6 @@ frameInfo:
hasMustTailInVarArgFunc: false
body: |
bb.0.main_body:
- successors: %bb.1.if, %bb.2.end
liveins: %vgpr0
%sgpr0_sgpr1 = COPY %exec
@@ -557,7 +544,6 @@ body: |
S_BRANCH %bb.1.if
bb.1.if:
- successors: %bb.2.end
liveins: %sgpr0_sgpr1, %sgpr2_sgpr3
S_SLEEP 0, implicit %sgpr2_sgpr3
%sgpr7 = S_MOV_B32 61440
@@ -606,7 +592,6 @@ frameInfo:
hasMustTailInVarArgFunc: false
body: |
bb.0.main_body:
- successors: %bb.1.if, %bb.2.end
liveins: %vgpr0
%sgpr0_sgpr1 = COPY %exec
@@ -618,7 +603,6 @@ body: |
S_BRANCH %bb.1.if
bb.1.if:
- successors: %bb.2.end
liveins: %sgpr0_sgpr1
%sgpr7 = S_MOV_B32 61440
@@ -665,7 +649,6 @@ frameInfo:
hasMustTailInVarArgFunc: false
body: |
bb.0.main_body:
- successors: %bb.1.if, %bb.2.end
liveins: %vgpr0
%sgpr0_sgpr1 = COPY %exec
@@ -677,7 +660,6 @@ body: |
S_BRANCH %bb.1.if
bb.1.if:
- successors: %bb.2.end
liveins: %sgpr0_sgpr1
%sgpr7 = S_MOV_B32 61440
@@ -724,7 +706,6 @@ frameInfo:
hasMustTailInVarArgFunc: false
body: |
bb.0.main_body:
- successors: %bb.1.if, %bb.2.end
liveins: %vgpr0
%sgpr0_sgpr1 = COPY %exec
@@ -736,7 +717,6 @@ body: |
S_BRANCH %bb.1.if
bb.1.if:
- successors: %bb.2.end
liveins: %sgpr0_sgpr1
%sgpr7 = S_MOV_B32 61440
diff --git a/test/CodeGen/AMDGPU/rename-independent-subregs.mir b/test/CodeGen/AMDGPU/rename-independent-subregs.mir
index fc2e4426ba48..31ad26e76979 100644
--- a/test/CodeGen/AMDGPU/rename-independent-subregs.mir
+++ b/test/CodeGen/AMDGPU/rename-independent-subregs.mir
@@ -49,7 +49,6 @@ registers:
- { id: 1, class: sreg_128 }
body: |
bb.0:
- successors: %bb.1, %bb.2
S_NOP 0, implicit-def undef %0.sub2
S_CBRANCH_VCCNZ %bb.1, implicit undef %vcc
S_BRANCH %bb.2
diff --git a/test/CodeGen/AMDGPU/scratch-simple.ll b/test/CodeGen/AMDGPU/scratch-simple.ll
new file mode 100644
index 000000000000..60b9b56a48d1
--- /dev/null
+++ b/test/CodeGen/AMDGPU/scratch-simple.ll
@@ -0,0 +1,103 @@
+; RUN: llc -march=amdgcn -mcpu=verde -mattr=+vgpr-spilling -verify-machineinstrs < %s | FileCheck --check-prefix=GCN --check-prefix=SI %s
+; RUN: llc -march=amdgcn -mcpu=tonga -mattr=-flat-for-global -mattr=+vgpr-spilling -verify-machineinstrs < %s | FileCheck --check-prefix=GCN --check-prefix=SI %s
+; RUN: llc -march=amdgcn -mcpu=gfx900 -mattr=-flat-for-global -mattr=+vgpr-spilling -verify-machineinstrs < %s | FileCheck --check-prefix=GCN --check-prefix=GFX9 %s
+
+; This used to fail due to a v_add_i32 instruction with an illegal immediate
+; operand that was created during Local Stack Slot Allocation. Test case derived
+; from https://bugs.freedesktop.org/show_bug.cgi?id=96602
+;
+; GCN-LABEL: {{^}}ps_main:
+
+; GCN-DAG: s_mov_b32 [[SWO:s[0-9]+]], s0
+; GCN-DAG: v_mov_b32_e32 [[K:v[0-9]+]], 0x200
+; GCN-DAG: v_mov_b32_e32 [[ZERO:v[0-9]+]], 0x400{{$}}
+; GCN-DAG: v_lshlrev_b32_e32 [[BYTES:v[0-9]+]], 2, v0
+; GCN-DAG: v_and_b32_e32 [[CLAMP_IDX:v[0-9]+]], 0x1fc, [[BYTES]]
+
+; GCN-DAG: v_or_b32_e32 [[LO_OFF:v[0-9]+]], [[CLAMP_IDX]], [[K]]
+; GCN-DAG: v_or_b32_e32 [[HI_OFF:v[0-9]+]], [[CLAMP_IDX]], [[ZERO]]
+
+; GCN: buffer_load_dword {{v[0-9]+}}, [[LO_OFF]], {{s\[[0-9]+:[0-9]+\]}}, [[SWO]] offen
+; GCN: buffer_load_dword {{v[0-9]+}}, [[HI_OFF]], {{s\[[0-9]+:[0-9]+\]}}, [[SWO]] offen
+define amdgpu_ps float @ps_main(i32 %idx) {
+ %v1 = extractelement <81 x float> <float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float 0x3FE41CFEA0000000, float 0xBFE7A693C0000000, float 0xBFEA477C60000000, float 0xBFEBE5DC60000000, float 0xBFEC71C720000000, float 0xBFEBE5DC60000000, float 0xBFEA477C60000000, float 0xBFE7A693C0000000, float 0xBFE41CFEA0000000, float 0x3FDF9B13E0000000, float 0x3FDF9B1380000000, float 0x3FD5C53B80000000, float 0x3FD5C53B00000000, float 0x3FC6326AC0000000, float 0x3FC63269E0000000, float 0xBEE05CEB00000000, float 0xBEE086A320000000, float 0xBFC63269E0000000, float 0xBFC6326AC0000000, float 0xBFD5C53B80000000, float 0xBFD5C53B80000000, float 0xBFDF9B13E0000000, float 0xBFDF9B1460000000, float 0xBFE41CFE80000000, float 0x3FE7A693C0000000, float 0x3FEA477C20000000, float 0x3FEBE5DC40000000, float 0x3FEC71C6E0000000, float 0x3FEBE5DC40000000, float 0x3FEA477C20000000, float 0x3FE7A693C0000000, float 0xBFE41CFE80000000>, i32 %idx
+ %v2 = extractelement <81 x float> <float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float 0xBFE41CFEA0000000, float 0xBFDF9B13E0000000, float 0xBFD5C53B80000000, float 0xBFC6326AC0000000, float 0x3EE0789320000000, float 0x3FC6326AC0000000, float 0x3FD5C53B80000000, float 0x3FDF9B13E0000000, float 0x3FE41CFEA0000000, float 0xBFE7A693C0000000, float 0x3FE7A693C0000000, float 0xBFEA477C20000000, float 0x3FEA477C20000000, float 0xBFEBE5DC40000000, float 0x3FEBE5DC40000000, float 0xBFEC71C720000000, float 0x3FEC71C6E0000000, float 0xBFEBE5DC60000000, float 0x3FEBE5DC40000000, float 0xBFEA477C20000000, float 0x3FEA477C20000000, float 0xBFE7A693C0000000, float 0x3FE7A69380000000, float 0xBFE41CFEA0000000, float 0xBFDF9B13E0000000, float 0xBFD5C53B80000000, float 0xBFC6326AC0000000, float 0x3EE0789320000000, float 0x3FC6326AC0000000, float 0x3FD5C53B80000000, float 0x3FDF9B13E0000000, float 0x3FE41CFE80000000>, i32 %idx
+ %r = fadd float %v1, %v2
+ ret float %r
+}
+
+; GCN-LABEL: {{^}}vs_main:
+; GCN: s_mov_b32 [[SWO:s[0-9]+]], s0
+; GCN: buffer_load_dword {{v[0-9]+}}, {{v[0-9]+}}, {{s\[[0-9]+:[0-9]+\]}}, [[SWO]] offen
+; GCN: buffer_load_dword {{v[0-9]+}}, {{v[0-9]+}}, {{s\[[0-9]+:[0-9]+\]}}, [[SWO]] offen
+define amdgpu_vs float @vs_main(i32 %idx) {
+ %v1 = extractelement <81 x float> <float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float 0x3FE41CFEA0000000, float 0xBFE7A693C0000000, float 0xBFEA477C60000000, float 0xBFEBE5DC60000000, float 0xBFEC71C720000000, float 0xBFEBE5DC60000000, float 0xBFEA477C60000000, float 0xBFE7A693C0000000, float 0xBFE41CFEA0000000, float 0x3FDF9B13E0000000, float 0x3FDF9B1380000000, float 0x3FD5C53B80000000, float 0x3FD5C53B00000000, float 0x3FC6326AC0000000, float 0x3FC63269E0000000, float 0xBEE05CEB00000000, float 0xBEE086A320000000, float 0xBFC63269E0000000, float 0xBFC6326AC0000000, float 0xBFD5C53B80000000, float 0xBFD5C53B80000000, float 0xBFDF9B13E0000000, float 0xBFDF9B1460000000, float 0xBFE41CFE80000000, float 0x3FE7A693C0000000, float 0x3FEA477C20000000, float 0x3FEBE5DC40000000, float 0x3FEC71C6E0000000, float 0x3FEBE5DC40000000, float 0x3FEA477C20000000, float 0x3FE7A693C0000000, float 0xBFE41CFE80000000>, i32 %idx
+ %v2 = extractelement <81 x float> <float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float 0xBFE41CFEA0000000, float 0xBFDF9B13E0000000, float 0xBFD5C53B80000000, float 0xBFC6326AC0000000, float 0x3EE0789320000000, float 0x3FC6326AC0000000, float 0x3FD5C53B80000000, float 0x3FDF9B13E0000000, float 0x3FE41CFEA0000000, float 0xBFE7A693C0000000, float 0x3FE7A693C0000000, float 0xBFEA477C20000000, float 0x3FEA477C20000000, float 0xBFEBE5DC40000000, float 0x3FEBE5DC40000000, float 0xBFEC71C720000000, float 0x3FEC71C6E0000000, float 0xBFEBE5DC60000000, float 0x3FEBE5DC40000000, float 0xBFEA477C20000000, float 0x3FEA477C20000000, float 0xBFE7A693C0000000, float 0x3FE7A69380000000, float 0xBFE41CFEA0000000, float 0xBFDF9B13E0000000, float 0xBFD5C53B80000000, float 0xBFC6326AC0000000, float 0x3EE0789320000000, float 0x3FC6326AC0000000, float 0x3FD5C53B80000000, float 0x3FDF9B13E0000000, float 0x3FE41CFE80000000>, i32 %idx
+ %r = fadd float %v1, %v2
+ ret float %r
+}
+
+; GCN-LABEL: {{^}}cs_main:
+; GCN: s_mov_b32 [[SWO:s[0-9]+]], s0
+; GCN: buffer_load_dword {{v[0-9]+}}, {{v[0-9]+}}, {{s\[[0-9]+:[0-9]+\]}}, [[SWO]] offen
+; GCN: buffer_load_dword {{v[0-9]+}}, {{v[0-9]+}}, {{s\[[0-9]+:[0-9]+\]}}, [[SWO]] offen
+define amdgpu_cs float @cs_main(i32 %idx) {
+ %v1 = extractelement <81 x float> <float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float 0x3FE41CFEA0000000, float 0xBFE7A693C0000000, float 0xBFEA477C60000000, float 0xBFEBE5DC60000000, float 0xBFEC71C720000000, float 0xBFEBE5DC60000000, float 0xBFEA477C60000000, float 0xBFE7A693C0000000, float 0xBFE41CFEA0000000, float 0x3FDF9B13E0000000, float 0x3FDF9B1380000000, float 0x3FD5C53B80000000, float 0x3FD5C53B00000000, float 0x3FC6326AC0000000, float 0x3FC63269E0000000, float 0xBEE05CEB00000000, float 0xBEE086A320000000, float 0xBFC63269E0000000, float 0xBFC6326AC0000000, float 0xBFD5C53B80000000, float 0xBFD5C53B80000000, float 0xBFDF9B13E0000000, float 0xBFDF9B1460000000, float 0xBFE41CFE80000000, float 0x3FE7A693C0000000, float 0x3FEA477C20000000, float 0x3FEBE5DC40000000, float 0x3FEC71C6E0000000, float 0x3FEBE5DC40000000, float 0x3FEA477C20000000, float 0x3FE7A693C0000000, float 0xBFE41CFE80000000>, i32 %idx
+ %v2 = extractelement <81 x float> <float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float 0xBFE41CFEA0000000, float 0xBFDF9B13E0000000, float 0xBFD5C53B80000000, float 0xBFC6326AC0000000, float 0x3EE0789320000000, float 0x3FC6326AC0000000, float 0x3FD5C53B80000000, float 0x3FDF9B13E0000000, float 0x3FE41CFEA0000000, float 0xBFE7A693C0000000, float 0x3FE7A693C0000000, float 0xBFEA477C20000000, float 0x3FEA477C20000000, float 0xBFEBE5DC40000000, float 0x3FEBE5DC40000000, float 0xBFEC71C720000000, float 0x3FEC71C6E0000000, float 0xBFEBE5DC60000000, float 0x3FEBE5DC40000000, float 0xBFEA477C20000000, float 0x3FEA477C20000000, float 0xBFE7A693C0000000, float 0x3FE7A69380000000, float 0xBFE41CFEA0000000, float 0xBFDF9B13E0000000, float 0xBFD5C53B80000000, float 0xBFC6326AC0000000, float 0x3EE0789320000000, float 0x3FC6326AC0000000, float 0x3FD5C53B80000000, float 0x3FDF9B13E0000000, float 0x3FE41CFE80000000>, i32 %idx
+ %r = fadd float %v1, %v2
+ ret float %r
+}
+
+; GCN-LABEL: {{^}}hs_main:
+; SI: s_mov_b32 [[SWO:s[0-9]+]], s0
+; GFX9: s_mov_b32 [[SWO:s[0-9]+]], s5
+; GCN: buffer_load_dword {{v[0-9]+}}, {{v[0-9]+}}, {{s\[[0-9]+:[0-9]+\]}}, [[SWO]] offen
+; GCN: buffer_load_dword {{v[0-9]+}}, {{v[0-9]+}}, {{s\[[0-9]+:[0-9]+\]}}, [[SWO]] offen
+define amdgpu_hs float @hs_main(i32 %idx) {
+ %v1 = extractelement <81 x float> <float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float 0x3FE41CFEA0000000, float 0xBFE7A693C0000000, float 0xBFEA477C60000000, float 0xBFEBE5DC60000000, float 0xBFEC71C720000000, float 0xBFEBE5DC60000000, float 0xBFEA477C60000000, float 0xBFE7A693C0000000, float 0xBFE41CFEA0000000, float 0x3FDF9B13E0000000, float 0x3FDF9B1380000000, float 0x3FD5C53B80000000, float 0x3FD5C53B00000000, float 0x3FC6326AC0000000, float 0x3FC63269E0000000, float 0xBEE05CEB00000000, float 0xBEE086A320000000, float 0xBFC63269E0000000, float 0xBFC6326AC0000000, float 0xBFD5C53B80000000, float 0xBFD5C53B80000000, float 0xBFDF9B13E0000000, float 0xBFDF9B1460000000, float 0xBFE41CFE80000000, float 0x3FE7A693C0000000, float 0x3FEA477C20000000, float 0x3FEBE5DC40000000, float 0x3FEC71C6E0000000, float 0x3FEBE5DC40000000, float 0x3FEA477C20000000, float 0x3FE7A693C0000000, float 0xBFE41CFE80000000>, i32 %idx
+ %v2 = extractelement <81 x float> <float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float 0xBFE41CFEA0000000, float 0xBFDF9B13E0000000, float 0xBFD5C53B80000000, float 0xBFC6326AC0000000, float 0x3EE0789320000000, float 0x3FC6326AC0000000, float 0x3FD5C53B80000000, float 0x3FDF9B13E0000000, float 0x3FE41CFEA0000000, float 0xBFE7A693C0000000, float 0x3FE7A693C0000000, float 0xBFEA477C20000000, float 0x3FEA477C20000000, float 0xBFEBE5DC40000000, float 0x3FEBE5DC40000000, float 0xBFEC71C720000000, float 0x3FEC71C6E0000000, float 0xBFEBE5DC60000000, float 0x3FEBE5DC40000000, float 0xBFEA477C20000000, float 0x3FEA477C20000000, float 0xBFE7A693C0000000, float 0x3FE7A69380000000, float 0xBFE41CFEA0000000, float 0xBFDF9B13E0000000, float 0xBFD5C53B80000000, float 0xBFC6326AC0000000, float 0x3EE0789320000000, float 0x3FC6326AC0000000, float 0x3FD5C53B80000000, float 0x3FDF9B13E0000000, float 0x3FE41CFE80000000>, i32 %idx
+ %r = fadd float %v1, %v2
+ ret float %r
+}
+
+; GCN-LABEL: {{^}}gs_main:
+; SI: s_mov_b32 [[SWO:s[0-9]+]], s0
+; GFX9: s_mov_b32 [[SWO:s[0-9]+]], s5
+; GCN: buffer_load_dword {{v[0-9]+}}, {{v[0-9]+}}, {{s\[[0-9]+:[0-9]+\]}}, [[SWO]] offen
+; GCN: buffer_load_dword {{v[0-9]+}}, {{v[0-9]+}}, {{s\[[0-9]+:[0-9]+\]}}, [[SWO]] offen
+define amdgpu_gs float @gs_main(i32 %idx) {
+ %v1 = extractelement <81 x float> <float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float 0x3FE41CFEA0000000, float 0xBFE7A693C0000000, float 0xBFEA477C60000000, float 0xBFEBE5DC60000000, float 0xBFEC71C720000000, float 0xBFEBE5DC60000000, float 0xBFEA477C60000000, float 0xBFE7A693C0000000, float 0xBFE41CFEA0000000, float 0x3FDF9B13E0000000, float 0x3FDF9B1380000000, float 0x3FD5C53B80000000, float 0x3FD5C53B00000000, float 0x3FC6326AC0000000, float 0x3FC63269E0000000, float 0xBEE05CEB00000000, float 0xBEE086A320000000, float 0xBFC63269E0000000, float 0xBFC6326AC0000000, float 0xBFD5C53B80000000, float 0xBFD5C53B80000000, float 0xBFDF9B13E0000000, float 0xBFDF9B1460000000, float 0xBFE41CFE80000000, float 0x3FE7A693C0000000, float 0x3FEA477C20000000, float 0x3FEBE5DC40000000, float 0x3FEC71C6E0000000, float 0x3FEBE5DC40000000, float 0x3FEA477C20000000, float 0x3FE7A693C0000000, float 0xBFE41CFE80000000>, i32 %idx
+ %v2 = extractelement <81 x float> <float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float 0xBFE41CFEA0000000, float 0xBFDF9B13E0000000, float 0xBFD5C53B80000000, float 0xBFC6326AC0000000, float 0x3EE0789320000000, float 0x3FC6326AC0000000, float 0x3FD5C53B80000000, float 0x3FDF9B13E0000000, float 0x3FE41CFEA0000000, float 0xBFE7A693C0000000, float 0x3FE7A693C0000000, float 0xBFEA477C20000000, float 0x3FEA477C20000000, float 0xBFEBE5DC40000000, float 0x3FEBE5DC40000000, float 0xBFEC71C720000000, float 0x3FEC71C6E0000000, float 0xBFEBE5DC60000000, float 0x3FEBE5DC40000000, float 0xBFEA477C20000000, float 0x3FEA477C20000000, float 0xBFE7A693C0000000, float 0x3FE7A69380000000, float 0xBFE41CFEA0000000, float 0xBFDF9B13E0000000, float 0xBFD5C53B80000000, float 0xBFC6326AC0000000, float 0x3EE0789320000000, float 0x3FC6326AC0000000, float 0x3FD5C53B80000000, float 0x3FDF9B13E0000000, float 0x3FE41CFE80000000>, i32 %idx
+ %r = fadd float %v1, %v2
+ ret float %r
+}
+
+; GCN-LABEL: {{^}}hs_ir_uses_scratch_offset:
+; SI: s_mov_b32 [[SWO:s[0-9]+]], s6
+; GFX9: s_mov_b32 [[SWO:s[0-9]+]], s5
+; GCN: buffer_load_dword {{v[0-9]+}}, {{v[0-9]+}}, {{s\[[0-9]+:[0-9]+\]}}, [[SWO]] offen
+; GCN: buffer_load_dword {{v[0-9]+}}, {{v[0-9]+}}, {{s\[[0-9]+:[0-9]+\]}}, [[SWO]] offen
+; GCN: s_mov_b32 s2, s5
+define amdgpu_hs <{i32, i32, i32, float}> @hs_ir_uses_scratch_offset(i32 inreg, i32 inreg, i32 inreg, i32 inreg, i32 inreg, i32 inreg %swo, i32 %idx) {
+ %v1 = extractelement <81 x float> <float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float 0x3FE41CFEA0000000, float 0xBFE7A693C0000000, float 0xBFEA477C60000000, float 0xBFEBE5DC60000000, float 0xBFEC71C720000000, float 0xBFEBE5DC60000000, float 0xBFEA477C60000000, float 0xBFE7A693C0000000, float 0xBFE41CFEA0000000, float 0x3FDF9B13E0000000, float 0x3FDF9B1380000000, float 0x3FD5C53B80000000, float 0x3FD5C53B00000000, float 0x3FC6326AC0000000, float 0x3FC63269E0000000, float 0xBEE05CEB00000000, float 0xBEE086A320000000, float 0xBFC63269E0000000, float 0xBFC6326AC0000000, float 0xBFD5C53B80000000, float 0xBFD5C53B80000000, float 0xBFDF9B13E0000000, float 0xBFDF9B1460000000, float 0xBFE41CFE80000000, float 0x3FE7A693C0000000, float 0x3FEA477C20000000, float 0x3FEBE5DC40000000, float 0x3FEC71C6E0000000, float 0x3FEBE5DC40000000, float 0x3FEA477C20000000, float 0x3FE7A693C0000000, float 0xBFE41CFE80000000>, i32 %idx
+ %v2 = extractelement <81 x float> <float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float 0xBFE41CFEA0000000, float 0xBFDF9B13E0000000, float 0xBFD5C53B80000000, float 0xBFC6326AC0000000, float 0x3EE0789320000000, float 0x3FC6326AC0000000, float 0x3FD5C53B80000000, float 0x3FDF9B13E0000000, float 0x3FE41CFEA0000000, float 0xBFE7A693C0000000, float 0x3FE7A693C0000000, float 0xBFEA477C20000000, float 0x3FEA477C20000000, float 0xBFEBE5DC40000000, float 0x3FEBE5DC40000000, float 0xBFEC71C720000000, float 0x3FEC71C6E0000000, float 0xBFEBE5DC60000000, float 0x3FEBE5DC40000000, float 0xBFEA477C20000000, float 0x3FEA477C20000000, float 0xBFE7A693C0000000, float 0x3FE7A69380000000, float 0xBFE41CFEA0000000, float 0xBFDF9B13E0000000, float 0xBFD5C53B80000000, float 0xBFC6326AC0000000, float 0x3EE0789320000000, float 0x3FC6326AC0000000, float 0x3FD5C53B80000000, float 0x3FDF9B13E0000000, float 0x3FE41CFE80000000>, i32 %idx
+ %f = fadd float %v1, %v2
+ %r1 = insertvalue <{i32, i32, i32, float}> undef, i32 %swo, 2
+ %r2 = insertvalue <{i32, i32, i32, float}> %r1, float %f, 3
+ ret <{i32, i32, i32, float}> %r2
+}
+
+; GCN-LABEL: {{^}}gs_ir_uses_scratch_offset:
+; SI: s_mov_b32 [[SWO:s[0-9]+]], s6
+; GFX9: s_mov_b32 [[SWO:s[0-9]+]], s5
+; GCN: buffer_load_dword {{v[0-9]+}}, {{v[0-9]+}}, {{s\[[0-9]+:[0-9]+\]}}, [[SWO]] offen
+; GCN: buffer_load_dword {{v[0-9]+}}, {{v[0-9]+}}, {{s\[[0-9]+:[0-9]+\]}}, [[SWO]] offen
+; GCN: s_mov_b32 s2, s5
+define amdgpu_gs <{i32, i32, i32, float}> @gs_ir_uses_scratch_offset(i32 inreg, i32 inreg, i32 inreg, i32 inreg, i32 inreg, i32 inreg %swo, i32 %idx) {
+ %v1 = extractelement <81 x float> <float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float 0x3FE41CFEA0000000, float 0xBFE7A693C0000000, float 0xBFEA477C60000000, float 0xBFEBE5DC60000000, float 0xBFEC71C720000000, float 0xBFEBE5DC60000000, float 0xBFEA477C60000000, float 0xBFE7A693C0000000, float 0xBFE41CFEA0000000, float 0x3FDF9B13E0000000, float 0x3FDF9B1380000000, float 0x3FD5C53B80000000, float 0x3FD5C53B00000000, float 0x3FC6326AC0000000, float 0x3FC63269E0000000, float 0xBEE05CEB00000000, float 0xBEE086A320000000, float 0xBFC63269E0000000, float 0xBFC6326AC0000000, float 0xBFD5C53B80000000, float 0xBFD5C53B80000000, float 0xBFDF9B13E0000000, float 0xBFDF9B1460000000, float 0xBFE41CFE80000000, float 0x3FE7A693C0000000, float 0x3FEA477C20000000, float 0x3FEBE5DC40000000, float 0x3FEC71C6E0000000, float 0x3FEBE5DC40000000, float 0x3FEA477C20000000, float 0x3FE7A693C0000000, float 0xBFE41CFE80000000>, i32 %idx
+ %v2 = extractelement <81 x float> <float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float undef, float 0xBFE41CFEA0000000, float 0xBFDF9B13E0000000, float 0xBFD5C53B80000000, float 0xBFC6326AC0000000, float 0x3EE0789320000000, float 0x3FC6326AC0000000, float 0x3FD5C53B80000000, float 0x3FDF9B13E0000000, float 0x3FE41CFEA0000000, float 0xBFE7A693C0000000, float 0x3FE7A693C0000000, float 0xBFEA477C20000000, float 0x3FEA477C20000000, float 0xBFEBE5DC40000000, float 0x3FEBE5DC40000000, float 0xBFEC71C720000000, float 0x3FEC71C6E0000000, float 0xBFEBE5DC60000000, float 0x3FEBE5DC40000000, float 0xBFEA477C20000000, float 0x3FEA477C20000000, float 0xBFE7A693C0000000, float 0x3FE7A69380000000, float 0xBFE41CFEA0000000, float 0xBFDF9B13E0000000, float 0xBFD5C53B80000000, float 0xBFC6326AC0000000, float 0x3EE0789320000000, float 0x3FC6326AC0000000, float 0x3FD5C53B80000000, float 0x3FDF9B13E0000000, float 0x3FE41CFE80000000>, i32 %idx
+ %f = fadd float %v1, %v2
+ %r1 = insertvalue <{i32, i32, i32, float}> undef, i32 %swo, 2
+ %r2 = insertvalue <{i32, i32, i32, float}> %r1, float %f, 3
+ ret <{i32, i32, i32, float}> %r2
+}
diff --git a/test/CodeGen/AMDGPU/si-fix-sgpr-copies.mir b/test/CodeGen/AMDGPU/si-fix-sgpr-copies.mir
index 20052e865a54..18176de53793 100644
--- a/test/CodeGen/AMDGPU/si-fix-sgpr-copies.mir
+++ b/test/CodeGen/AMDGPU/si-fix-sgpr-copies.mir
@@ -20,12 +20,10 @@ body: |
; GCN: V_ADD_I32
bb.0:
liveins: %vgpr0
- successors: %bb.1
%7 = COPY %vgpr0
%8 = S_MOV_B32 0
bb.1:
- successors: %bb.1, %bb.2
%0 = PHI %8, %bb.0, %0, %bb.1, %2, %bb.2
%9 = V_MOV_B32_e32 9, implicit %exec
%10 = V_CMP_EQ_U32_e64 %7, %9, implicit %exec
@@ -33,7 +31,6 @@ body: |
S_BRANCH %bb.1
bb.2:
- successors: %bb.1
SI_END_CF %1, implicit-def %exec, implicit-def %scc, implicit %exec
%11 = S_MOV_B32 1
%2 = S_ADD_I32 %0, %11, implicit-def %scc
diff --git a/test/CodeGen/AMDGPU/subreg-intervals.mir b/test/CodeGen/AMDGPU/subreg-intervals.mir
index c477fe9bc6d3..62816da25b2c 100644
--- a/test/CodeGen/AMDGPU/subreg-intervals.mir
+++ b/test/CodeGen/AMDGPU/subreg-intervals.mir
@@ -31,17 +31,14 @@ registers:
- { id: 0, class: sreg_64 }
body: |
bb.0:
- successors: %bb.1, %bb.2
S_CBRANCH_VCCNZ %bb.1, implicit undef %vcc
S_BRANCH %bb.2
bb.1:
- successors: %bb.3
S_NOP 0, implicit-def undef %0.sub0
S_BRANCH %bb.3
bb.2:
- successors: %bb.3
S_NOP 0, implicit-def %0
S_BRANCH %bb.3
diff --git a/test/CodeGen/AMDGPU/vccz-corrupt-bug-workaround.mir b/test/CodeGen/AMDGPU/vccz-corrupt-bug-workaround.mir
index 5e5465800c3a..6eb937e71b1b 100644
--- a/test/CodeGen/AMDGPU/vccz-corrupt-bug-workaround.mir
+++ b/test/CodeGen/AMDGPU/vccz-corrupt-bug-workaround.mir
@@ -75,7 +75,6 @@ frameInfo:
hasMustTailInVarArgFunc: false
body: |
bb.0.entry:
- successors: %bb.2.if, %bb.1.else
liveins: %sgpr0_sgpr1
%sgpr2 = S_LOAD_DWORD_IMM %sgpr0_sgpr1, 9, 0 :: (non-temporal dereferenceable invariant load 4 from `float addrspace(2)* undef`)
@@ -86,7 +85,6 @@ body: |
S_CBRANCH_VCCZ %bb.1.else, implicit killed %vcc
bb.2.if:
- successors: %bb.3.done
liveins: %sgpr6, %sgpr7, %sgpr0_sgpr1_sgpr2_sgpr3:0x00000003
%vgpr0 = V_MOV_B32_e32 9, implicit %exec
@@ -95,7 +93,6 @@ body: |
S_BRANCH %bb.3.done
bb.1.else:
- successors: %bb.3.done
liveins: %sgpr6, %sgpr7, %sgpr0_sgpr1_sgpr2_sgpr3:0x00000003
%vgpr0 = V_MOV_B32_e32 100, implicit %exec
@@ -141,7 +138,6 @@ frameInfo:
hasMustTailInVarArgFunc: false
body: |
bb.0.entry:
- successors: %bb.2.if, %bb.1.else
liveins: %sgpr0_sgpr1
%sgpr0_sgpr1 = S_LOAD_DWORDX2_IMM killed %sgpr0_sgpr1, 11, 0 :: (non-temporal dereferenceable invariant load 8 from `i64 addrspace(2)* undef`)
@@ -150,7 +146,6 @@ body: |
S_CBRANCH_VCCZ %bb.1.else, implicit undef %vcc
bb.2.if:
- successors: %bb.3.done
liveins: %sgpr6, %sgpr7, %sgpr0_sgpr1_sgpr2_sgpr3:0x00000003
%vgpr0 = V_MOV_B32_e32 9, implicit %exec
@@ -159,7 +154,6 @@ body: |
S_BRANCH %bb.3.done
bb.1.else:
- successors: %bb.3.done
liveins: %sgpr6, %sgpr7, %sgpr0_sgpr1_sgpr2_sgpr3:0x00000003
%vgpr0 = V_MOV_B32_e32 100, implicit %exec
diff --git a/test/CodeGen/AMDGPU/waitcnt-looptest.ll b/test/CodeGen/AMDGPU/waitcnt-looptest.ll
new file mode 100644
index 000000000000..2a3ce4dfd191
--- /dev/null
+++ b/test/CodeGen/AMDGPU/waitcnt-looptest.ll
@@ -0,0 +1,146 @@
+; RUN: llc < %s -mtriple=amdgcn--amdhsa -mcpu=fiji -mattr=-flat-for-global | FileCheck --check-prefix=GCN %s
+
+; Check that the waitcnt insertion algorithm correctly propagates wait counts
+; from before a loop to the loop header.
+
+; GCN-LABEL: {{^}}testKernel
+; GCN: BB0_1:
+; GCN: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN-NEXT: v_cmp_eq_f32_e64
+; GCN: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN-NEXT: v_cmp_eq_f32_e32
+; GCN: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GCN-NEXT: v_cmp_eq_f32_e32
+
+@data_generic = addrspace(1) global [100 x float] [float 0.000000e+00, float 0x3FB99999A0000000, float 0x3FC99999A0000000, float 0x3FD3333340000000, float 0x3FD99999A0000000, float 5.000000e-01, float 0x3FE3333340000000, float 0x3FE6666660000000, float 0x3FE99999A0000000, float 0x3FECCCCCC0000000, float 1.000000e+00, float 0x3FF19999A0000000, float 0x3FF3333340000000, float 0x3FF4CCCCC0000000, float 0x3FF6666660000000, float 1.500000e+00, float 0x3FF99999A0000000, float 0x3FFB333340000000, float 0x3FFCCCCCC0000000, float 0x3FFE666660000000, float 2.000000e+00, float 0x4000CCCCC0000000, float 0x40019999A0000000, float 0x4002666660000000, float 0x4003333340000000, float 2.500000e+00, float 0x4004CCCCC0000000, float 0x40059999A0000000, float 0x4006666660000000, float 0x4007333340000000, float 3.000000e+00, float 0x4008CCCCC0000000, float 0x40099999A0000000, float 0x400A666660000000, float 0x400B333340000000, float 3.500000e+00, float 0x400CCCCCC0000000, float 0x400D9999A0000000, float 0x400E666660000000, float 0x400F333340000000, float 4.000000e+00, float 0x4010666660000000, float 0x4010CCCCC0000000, float 0x4011333340000000, float 0x40119999A0000000, float 4.500000e+00, float 0x4012666660000000, float 0x4012CCCCC0000000, float 0x4013333340000000, float 0x40139999A0000000, float 5.000000e+00, float 0x4014666660000000, float 0x4014CCCCC0000000, float 0x4015333340000000, float 0x40159999A0000000, float 5.500000e+00, float 0x4016666660000000, float 0x4016CCCCC0000000, float 0x4017333340000000, float 0x40179999A0000000, float 6.000000e+00, float 0x4018666660000000, float 0x4018CCCCC0000000, float 0x4019333340000000, float 0x40199999A0000000, float 6.500000e+00, float 0x401A666660000000, float 0x401ACCCCC0000000, float 0x401B333340000000, float 0x401B9999A0000000, float 7.000000e+00, float 0x401C666660000000, float 0x401CCCCCC0000000, float 0x401D333340000000, float 0x401D9999A0000000, float 7.500000e+00, float 0x401E666660000000, float 0x401ECCCCC0000000, float 0x401F333340000000, float 0x401F9999A0000000, float 8.000000e+00, float 0x4020333340000000, float 0x4020666660000000, float 0x40209999A0000000, float 0x4020CCCCC0000000, float 8.500000e+00, float 0x4021333340000000, float 0x4021666660000000, float 0x40219999A0000000, float 0x4021CCCCC0000000, float 9.000000e+00, float 0x4022333340000000, float 0x4022666660000000, float 0x40229999A0000000, float 0x4022CCCCC0000000, float 9.500000e+00, float 0x4023333340000000, float 0x4023666660000000, float 0x40239999A0000000, float 0x4023CCCCC0000000], align 4
+@data_reference = addrspace(1) global [100 x float] [float 0.000000e+00, float 0x3FB99999A0000000, float 0x3FC99999A0000000, float 0x3FD3333340000000, float 0x3FD99999A0000000, float 5.000000e-01, float 0x3FE3333340000000, float 0x3FE6666660000000, float 0x3FE99999A0000000, float 0x3FECCCCCC0000000, float 1.000000e+00, float 0x3FF19999A0000000, float 0x3FF3333340000000, float 0x3FF4CCCCC0000000, float 0x3FF6666660000000, float 1.500000e+00, float 0x3FF99999A0000000, float 0x3FFB333340000000, float 0x3FFCCCCCC0000000, float 0x3FFE666660000000, float 2.000000e+00, float 0x4000CCCCC0000000, float 0x40019999A0000000, float 0x4002666660000000, float 0x4003333340000000, float 2.500000e+00, float 0x4004CCCCC0000000, float 0x40059999A0000000, float 0x4006666660000000, float 0x4007333340000000, float 3.000000e+00, float 0x4008CCCCC0000000, float 0x40099999A0000000, float 0x400A666660000000, float 0x400B333340000000, float 3.500000e+00, float 0x400CCCCCC0000000, float 0x400D9999A0000000, float 0x400E666660000000, float 0x400F333340000000, float 4.000000e+00, float 0x4010666660000000, float 0x4010CCCCC0000000, float 0x4011333340000000, float 0x40119999A0000000, float 4.500000e+00, float 0x4012666660000000, float 0x4012CCCCC0000000, float 0x4013333340000000, float 0x40139999A0000000, float 5.000000e+00, float 0x4014666660000000, float 0x4014CCCCC0000000, float 0x4015333340000000, float 0x40159999A0000000, float 5.500000e+00, float 0x4016666660000000, float 0x4016CCCCC0000000, float 0x4017333340000000, float 0x40179999A0000000, float 6.000000e+00, float 0x4018666660000000, float 0x4018CCCCC0000000, float 0x4019333340000000, float 0x40199999A0000000, float 6.500000e+00, float 0x401A666660000000, float 0x401ACCCCC0000000, float 0x401B333340000000, float 0x401B9999A0000000, float 7.000000e+00, float 0x401C666660000000, float 0x401CCCCCC0000000, float 0x401D333340000000, float 0x401D9999A0000000, float 7.500000e+00, float 0x401E666660000000, float 0x401ECCCCC0000000, float 0x401F333340000000, float 0x401F9999A0000000, float 8.000000e+00, float 0x4020333340000000, float 0x4020666660000000, float 0x40209999A0000000, float 0x4020CCCCC0000000, float 8.500000e+00, float 0x4021333340000000, float 0x4021666660000000, float 0x40219999A0000000, float 0x4021CCCCC0000000, float 9.000000e+00, float 0x4022333340000000, float 0x4022666660000000, float 0x40229999A0000000, float 0x4022CCCCC0000000, float 9.500000e+00, float 0x4023333340000000, float 0x4023666660000000, float 0x40239999A0000000, float 0x4023CCCCC0000000], align 4
+
+define amdgpu_kernel void @testKernel(i32 addrspace(1)* nocapture %arg) local_unnamed_addr #0 {
+bb:
+ store <2 x float> <float 1.000000e+00, float 1.000000e+00>, <2 x float> addrspace(4)* bitcast (float addrspace(4)* getelementptr ([100 x float], [100 x float] addrspace(4)* addrspacecast ([100 x float] addrspace(1)* @data_generic to [100 x float] addrspace(4)*), i64 0, i64 4) to <2 x float> addrspace(4)*), align 4
+ store <2 x float> <float 1.000000e+00, float 1.000000e+00>, <2 x float> addrspace(4)* bitcast (float addrspace(4)* getelementptr ([100 x float], [100 x float] addrspace(4)* addrspacecast ([100 x float] addrspace(1)* @data_reference to [100 x float] addrspace(4)*), i64 0, i64 4) to <2 x float> addrspace(4)*), align 4
+ br label %bb18
+
+bb1: ; preds = %bb18
+ %tmp = tail call i8 addrspace(2)* @llvm.amdgcn.dispatch.ptr()
+ %tmp2 = tail call i32 @llvm.amdgcn.workitem.id.x()
+ %tmp3 = tail call i32 @llvm.amdgcn.workgroup.id.x()
+ %tmp4 = getelementptr inbounds i8, i8 addrspace(2)* %tmp, i64 4
+ %tmp5 = bitcast i8 addrspace(2)* %tmp4 to i16 addrspace(2)*
+ %tmp6 = load i16, i16 addrspace(2)* %tmp5, align 4
+ %tmp7 = zext i16 %tmp6 to i32
+ %tmp8 = mul i32 %tmp3, %tmp7
+ %tmp9 = add i32 %tmp8, %tmp2
+ %tmp10 = tail call i8 addrspace(2)* @llvm.amdgcn.implicitarg.ptr()
+ %tmp11 = zext i32 %tmp9 to i64
+ %tmp12 = bitcast i8 addrspace(2)* %tmp10 to i64 addrspace(2)*
+ %tmp13 = load i64, i64 addrspace(2)* %tmp12, align 8
+ %tmp14 = add i64 %tmp13, %tmp11
+ %tmp15 = zext i1 %tmp99 to i32
+ %tmp16 = and i64 %tmp14, 4294967295
+ %tmp17 = getelementptr inbounds i32, i32 addrspace(1)* %arg, i64 %tmp16
+ store i32 %tmp15, i32 addrspace(1)* %tmp17, align 4
+ ret void
+
+bb18: ; preds = %bb18, %bb
+ %tmp19 = phi i64 [ 0, %bb ], [ %tmp102, %bb18 ]
+ %tmp20 = phi i32 [ 0, %bb ], [ %tmp100, %bb18 ]
+ %tmp21 = phi i1 [ true, %bb ], [ %tmp99, %bb18 ]
+ %tmp22 = getelementptr inbounds [100 x float], [100 x float] addrspace(1)* @data_generic, i64 0, i64 %tmp19
+ %tmp23 = load float, float addrspace(1)* %tmp22, align 4
+ %tmp24 = getelementptr inbounds [100 x float], [100 x float] addrspace(1)* @data_reference, i64 0, i64 %tmp19
+ %tmp25 = load float, float addrspace(1)* %tmp24, align 4
+ %tmp26 = fcmp oeq float %tmp23, %tmp25
+ %tmp27 = and i1 %tmp21, %tmp26
+ %tmp28 = or i32 %tmp20, 1
+ %tmp29 = sext i32 %tmp28 to i64
+ %tmp30 = getelementptr inbounds [100 x float], [100 x float] addrspace(1)* @data_generic, i64 0, i64 %tmp29
+ %tmp31 = load float, float addrspace(1)* %tmp30, align 4
+ %tmp32 = getelementptr inbounds [100 x float], [100 x float] addrspace(1)* @data_reference, i64 0, i64 %tmp29
+ %tmp33 = load float, float addrspace(1)* %tmp32, align 4
+ %tmp34 = fcmp oeq float %tmp31, %tmp33
+ %tmp35 = and i1 %tmp27, %tmp34
+ %tmp36 = add nuw nsw i32 %tmp20, 2
+ %tmp37 = sext i32 %tmp36 to i64
+ %tmp38 = getelementptr inbounds [100 x float], [100 x float] addrspace(1)* @data_generic, i64 0, i64 %tmp37
+ %tmp39 = load float, float addrspace(1)* %tmp38, align 4
+ %tmp40 = getelementptr inbounds [100 x float], [100 x float] addrspace(1)* @data_reference, i64 0, i64 %tmp37
+ %tmp41 = load float, float addrspace(1)* %tmp40, align 4
+ %tmp42 = fcmp oeq float %tmp39, %tmp41
+ %tmp43 = and i1 %tmp35, %tmp42
+ %tmp44 = add nuw nsw i32 %tmp20, 3
+ %tmp45 = sext i32 %tmp44 to i64
+ %tmp46 = getelementptr inbounds [100 x float], [100 x float] addrspace(1)* @data_generic, i64 0, i64 %tmp45
+ %tmp47 = load float, float addrspace(1)* %tmp46, align 4
+ %tmp48 = getelementptr inbounds [100 x float], [100 x float] addrspace(1)* @data_reference, i64 0, i64 %tmp45
+ %tmp49 = load float, float addrspace(1)* %tmp48, align 4
+ %tmp50 = fcmp oeq float %tmp47, %tmp49
+ %tmp51 = and i1 %tmp43, %tmp50
+ %tmp52 = add nuw nsw i32 %tmp20, 4
+ %tmp53 = sext i32 %tmp52 to i64
+ %tmp54 = getelementptr inbounds [100 x float], [100 x float] addrspace(1)* @data_generic, i64 0, i64 %tmp53
+ %tmp55 = load float, float addrspace(1)* %tmp54, align 4
+ %tmp56 = getelementptr inbounds [100 x float], [100 x float] addrspace(1)* @data_reference, i64 0, i64 %tmp53
+ %tmp57 = load float, float addrspace(1)* %tmp56, align 4
+ %tmp58 = fcmp oeq float %tmp55, %tmp57
+ %tmp59 = and i1 %tmp51, %tmp58
+ %tmp60 = add nuw nsw i32 %tmp20, 5
+ %tmp61 = sext i32 %tmp60 to i64
+ %tmp62 = getelementptr inbounds [100 x float], [100 x float] addrspace(1)* @data_generic, i64 0, i64 %tmp61
+ %tmp63 = load float, float addrspace(1)* %tmp62, align 4
+ %tmp64 = getelementptr inbounds [100 x float], [100 x float] addrspace(1)* @data_reference, i64 0, i64 %tmp61
+ %tmp65 = load float, float addrspace(1)* %tmp64, align 4
+ %tmp66 = fcmp oeq float %tmp63, %tmp65
+ %tmp67 = and i1 %tmp59, %tmp66
+ %tmp68 = add nuw nsw i32 %tmp20, 6
+ %tmp69 = sext i32 %tmp68 to i64
+ %tmp70 = getelementptr inbounds [100 x float], [100 x float] addrspace(1)* @data_generic, i64 0, i64 %tmp69
+ %tmp71 = load float, float addrspace(1)* %tmp70, align 4
+ %tmp72 = getelementptr inbounds [100 x float], [100 x float] addrspace(1)* @data_reference, i64 0, i64 %tmp69
+ %tmp73 = load float, float addrspace(1)* %tmp72, align 4
+ %tmp74 = fcmp oeq float %tmp71, %tmp73
+ %tmp75 = and i1 %tmp67, %tmp74
+ %tmp76 = add nuw nsw i32 %tmp20, 7
+ %tmp77 = sext i32 %tmp76 to i64
+ %tmp78 = getelementptr inbounds [100 x float], [100 x float] addrspace(1)* @data_generic, i64 0, i64 %tmp77
+ %tmp79 = load float, float addrspace(1)* %tmp78, align 4
+ %tmp80 = getelementptr inbounds [100 x float], [100 x float] addrspace(1)* @data_reference, i64 0, i64 %tmp77
+ %tmp81 = load float, float addrspace(1)* %tmp80, align 4
+ %tmp82 = fcmp oeq float %tmp79, %tmp81
+ %tmp83 = and i1 %tmp75, %tmp82
+ %tmp84 = add nuw nsw i32 %tmp20, 8
+ %tmp85 = sext i32 %tmp84 to i64
+ %tmp86 = getelementptr inbounds [100 x float], [100 x float] addrspace(1)* @data_generic, i64 0, i64 %tmp85
+ %tmp87 = load float, float addrspace(1)* %tmp86, align 4
+ %tmp88 = getelementptr inbounds [100 x float], [100 x float] addrspace(1)* @data_reference, i64 0, i64 %tmp85
+ %tmp89 = load float, float addrspace(1)* %tmp88, align 4
+ %tmp90 = fcmp oeq float %tmp87, %tmp89
+ %tmp91 = and i1 %tmp83, %tmp90
+ %tmp92 = add nuw nsw i32 %tmp20, 9
+ %tmp93 = sext i32 %tmp92 to i64
+ %tmp94 = getelementptr inbounds [100 x float], [100 x float] addrspace(1)* @data_generic, i64 0, i64 %tmp93
+ %tmp95 = load float, float addrspace(1)* %tmp94, align 4
+ %tmp96 = getelementptr inbounds [100 x float], [100 x float] addrspace(1)* @data_reference, i64 0, i64 %tmp93
+ %tmp97 = load float, float addrspace(1)* %tmp96, align 4
+ %tmp98 = fcmp oeq float %tmp95, %tmp97
+ %tmp99 = and i1 %tmp91, %tmp98
+ %tmp100 = add nuw nsw i32 %tmp20, 10
+ %tmp101 = icmp eq i32 %tmp100, 100
+ %tmp102 = sext i32 %tmp100 to i64
+ br i1 %tmp101, label %bb1, label %bb18
+}
+
+; Function Attrs: nounwind readnone speculatable
+declare i8 addrspace(2)* @llvm.amdgcn.dispatch.ptr() #1
+
+; Function Attrs: nounwind readnone speculatable
+declare i32 @llvm.amdgcn.workitem.id.x() #1
+
+; Function Attrs: nounwind readnone speculatable
+declare i32 @llvm.amdgcn.workgroup.id.x() #1
+
+; Function Attrs: nounwind readnone speculatable
+declare i8 addrspace(2)* @llvm.amdgcn.implicitarg.ptr() #1
+
+attributes #0 = { "target-cpu"="fiji" "target-features"="-flat-for-global" }
+attributes #1 = { nounwind readnone speculatable }
diff --git a/test/CodeGen/ARM/ARMLoadStoreDBG.mir b/test/CodeGen/ARM/ARMLoadStoreDBG.mir
index 0e6f80bfb48b..cf5388ac1ccb 100644
--- a/test/CodeGen/ARM/ARMLoadStoreDBG.mir
+++ b/test/CodeGen/ARM/ARMLoadStoreDBG.mir
@@ -118,7 +118,6 @@ stack:
- { id: 1, type: spill-slot, offset: -8, size: 4, alignment: 4, callee-saved-register: '%r7' }
body: |
bb.0.entry:
- successors: %bb.1, %bb.2.if.end
liveins: %r0, %r1, %r2, %r3, %lr, %r7
DBG_VALUE debug-use %r0, debug-use _, !18, !27, debug-location !28
diff --git a/test/CodeGen/ARM/acle-intrinsics-v5.ll b/test/CodeGen/ARM/acle-intrinsics-v5.ll
new file mode 100644
index 000000000000..407bea148863
--- /dev/null
+++ b/test/CodeGen/ARM/acle-intrinsics-v5.ll
@@ -0,0 +1,110 @@
+; RUN: llc -O1 -mtriple=armv5te-none-none-eabi %s -o - | FileCheck %s
+; RUN: llc -O1 -mtriple=armv6-none-none-eabi %s -o - | FileCheck %s
+; RUN: llc -O1 -mtriple=armv7-none-none-eabi %s -o - | FileCheck %s
+; RUN: llc -O1 -mtriple=thumbv7-none-none-eabi %s -o - | FileCheck %s
+; RUN: llc -O1 -mtriple=thumbv6t2-none-none-eabi %s -o - | FileCheck %s
+; RUN: llc -O1 -mtriple=thumbv7em-none-none-eabi %s -o - | FileCheck %s
+; RUN: llc -O1 -mtriple=thumbv8m.main-none-none-eabi -mattr=+dsp %s -o - | FileCheck %s
+define i32 @smulbb(i32 %a, i32 %b) {
+; CHECK-LABEL: smulbb
+; CHECK: smulbb r0, r0, r1
+ %tmp = call i32 @llvm.arm.smulbb(i32 %a, i32 %b)
+ ret i32 %tmp
+}
+
+define i32 @smulbt(i32 %a, i32 %b) {
+; CHECK-LABEL: smulbt
+; CHECK: smulbt r0, r0, r1
+ %tmp = call i32 @llvm.arm.smulbt(i32 %a, i32 %b)
+ ret i32 %tmp
+}
+
+define i32 @smultb(i32 %a, i32 %b) {
+; CHECK-LABEL: smultb
+; CHECK: smultb r0, r0, r1
+ %tmp = call i32 @llvm.arm.smultb(i32 %a, i32 %b)
+ ret i32 %tmp
+}
+
+define i32 @smultt(i32 %a, i32 %b) {
+; CHECK-LABEL: smultt
+; CHECK: smultt r0, r0, r1
+ %tmp = call i32 @llvm.arm.smultt(i32 %a, i32 %b)
+ ret i32 %tmp
+}
+
+define i32 @smulwb(i32 %a, i32 %b) {
+; CHECK-LABEL: smulwb
+; CHECK: smulwb r0, r0, r1
+ %tmp = call i32 @llvm.arm.smulwb(i32 %a, i32 %b)
+ ret i32 %tmp
+}
+
+define i32 @smulwt(i32 %a, i32 %b) {
+; CHECK-LABEL: smulwt
+; CHECK: smulwt r0, r0, r1
+ %tmp = call i32 @llvm.arm.smulwt(i32 %a, i32 %b)
+ ret i32 %tmp
+}
+
+define i32 @acc_mults(i32 %a, i32 %b, i32 %acc) {
+; CHECK-LABEL: acc_mults
+; CHECK: smlabb r2, r0, r1, r2
+; CHECK: smlabt r2, r0, r1, r2
+; CHECK: smlatb r2, r0, r1, r2
+; CHECK: smlatt r2, r0, r1, r2
+; CHECK: smlawb r2, r0, r1, r2
+; CHECK: smlawt r0, r0, r1, r2
+ %acc1 = call i32 @llvm.arm.smlabb(i32 %a, i32 %b, i32 %acc)
+ %acc2 = call i32 @llvm.arm.smlabt(i32 %a, i32 %b, i32 %acc1)
+ %acc3 = call i32 @llvm.arm.smlatb(i32 %a, i32 %b, i32 %acc2)
+ %acc4 = call i32 @llvm.arm.smlatt(i32 %a, i32 %b, i32 %acc3)
+ %acc5 = call i32 @llvm.arm.smlawb(i32 %a, i32 %b, i32 %acc4)
+ %acc6 = call i32 @llvm.arm.smlawt(i32 %a, i32 %b, i32 %acc5)
+ ret i32 %acc6
+}
+
+define i32 @qadd(i32 %a, i32 %b) nounwind {
+; CHECK-LABEL: qadd
+; CHECK: qadd r0, r0, r1
+ %tmp = call i32 @llvm.arm.qadd(i32 %a, i32 %b)
+ ret i32 %tmp
+}
+
+define i32 @qsub(i32 %a, i32 %b) nounwind {
+; CHECK-LABEL: qsub
+; CHECK: qsub r0, r0, r1
+ %tmp = call i32 @llvm.arm.qsub(i32 %a, i32 %b)
+ ret i32 %tmp
+}
+
+define i32 @qdadd(i32 %a, i32 %b) nounwind {
+; CHECK-LABEL: qdadd
+; CHECK: qdadd r0, r0, r1
+ %dbl = call i32 @llvm.arm.qadd(i32 %a, i32 %a)
+ %add = call i32 @llvm.arm.qadd(i32 %dbl, i32 %b)
+ ret i32 %add
+}
+
+define i32 @qdsub(i32 %a, i32 %b) nounwind {
+; CHECK-LABEL: qdsub
+; CHECK: qdsub r0, r0, r1
+ %dbl = call i32 @llvm.arm.qadd(i32 %b, i32 %b)
+ %add = call i32 @llvm.arm.qsub(i32 %a, i32 %dbl)
+ ret i32 %add
+}
+
+declare i32 @llvm.arm.smulbb(i32 %a, i32 %b) nounwind readnone
+declare i32 @llvm.arm.smulbt(i32 %a, i32 %b) nounwind readnone
+declare i32 @llvm.arm.smultb(i32 %a, i32 %b) nounwind readnone
+declare i32 @llvm.arm.smultt(i32 %a, i32 %b) nounwind readnone
+declare i32 @llvm.arm.smulwb(i32 %a, i32 %b) nounwind readnone
+declare i32 @llvm.arm.smulwt(i32 %a, i32 %b) nounwind readnone
+declare i32 @llvm.arm.smlabb(i32, i32, i32) nounwind
+declare i32 @llvm.arm.smlabt(i32, i32, i32) nounwind
+declare i32 @llvm.arm.smlatb(i32, i32, i32) nounwind
+declare i32 @llvm.arm.smlatt(i32, i32, i32) nounwind
+declare i32 @llvm.arm.smlawb(i32, i32, i32) nounwind
+declare i32 @llvm.arm.smlawt(i32, i32, i32) nounwind
+declare i32 @llvm.arm.qadd(i32, i32) nounwind
+declare i32 @llvm.arm.qsub(i32, i32) nounwind
diff --git a/test/CodeGen/ARM/acle-intrinsics.ll b/test/CodeGen/ARM/acle-intrinsics.ll
new file mode 100644
index 000000000000..0c20744e126b
--- /dev/null
+++ b/test/CodeGen/ARM/acle-intrinsics.ll
@@ -0,0 +1,481 @@
+; RUN: llc -O1 -mtriple=armv6-none-none-eabi %s -o - | FileCheck %s
+; RUN: llc -O1 -mtriple=armv7-none-none-eabi %s -o - | FileCheck %s
+; RUN: llc -O1 -mtriple=thumbv7-none-none-eabi %s -o - | FileCheck %s
+; RUN: llc -O1 -mtriple=thumbv6t2-none-none-eabi %s -o - | FileCheck %s
+; RUN: llc -O1 -mtriple=thumbv7em-none-none-eabi %s -o - | FileCheck %s
+; RUN: llc -O1 -mtriple=thumbv8m.main-none-none-eabi -mattr=+dsp %s -o - | FileCheck %s
+
+
+; upper-bound of the immediate argument
+define i32 @ssat1(i32 %a) nounwind {
+; CHECK-LABEL: ssat1
+; CHECK: ssat r0, #32, r0
+ %tmp = call i32 @llvm.arm.ssat(i32 %a, i32 32)
+ ret i32 %tmp
+}
+
+; lower-bound of the immediate argument
+define i32 @ssat2(i32 %a) nounwind {
+; CHECK-LABEL: ssat2
+; CHECK: ssat r0, #1, r0
+ %tmp = call i32 @llvm.arm.ssat(i32 %a, i32 1)
+ ret i32 %tmp
+}
+
+; upper-bound of the immediate argument
+define i32 @usat1(i32 %a) nounwind {
+; CHECK-LABEL: usat1
+; CHECK: usat r0, #31, r0
+ %tmp = call i32 @llvm.arm.usat(i32 %a, i32 31)
+ ret i32 %tmp
+}
+
+; lower-bound of the immediate argument
+define i32 @usat2(i32 %a) nounwind {
+; CHECK-LABEL: usat2
+; CHECK: usat r0, #0, r0
+ %tmp = call i32 @llvm.arm.usat(i32 %a, i32 0)
+ ret i32 %tmp
+}
+
+define i32 @ssat16 (i32 %a) nounwind {
+; CHECK-LABEL: ssat16
+; CHECK: ssat16 r0, #1, r0
+; CHECK: ssat16 r0, #16, r0
+ %tmp = call i32 @llvm.arm.ssat16(i32 %a, i32 1)
+ %tmp2 = call i32 @llvm.arm.ssat16(i32 %tmp, i32 16)
+ ret i32 %tmp2
+}
+
+define i32 @usat16(i32 %a) nounwind {
+; CHECK-LABEL: usat16
+; CHECK: usat16 r0, #0, r0
+; CHECK: usat16 r0, #15, r0
+ %tmp = call i32 @llvm.arm.usat16(i32 %a, i32 0)
+ %tmp2 = call i32 @llvm.arm.usat16(i32 %tmp, i32 15)
+ ret i32 %tmp2
+}
+
+define i32 @pack_unpack(i32 %a, i32 %b) nounwind {
+; CHECK-LABEL: pack_unpack
+; CHECK: sxtab16 r0, r0, r1
+; CHECK: sxtb16 r0, r0
+; CHECK: uxtab16 r0, r1, r0
+; CHECK: uxtb16 r0, r0
+ %tmp = call i32 @llvm.arm.sxtab16(i32 %a, i32 %b)
+ %tmp1 = call i32 @llvm.arm.sxtb16(i32 %tmp)
+ %tmp2 = call i32 @llvm.arm.uxtab16(i32 %b, i32 %tmp1)
+ %tmp3 = call i32 @llvm.arm.uxtb16(i32 %tmp2)
+ ret i32 %tmp3
+}
+
+define i32 @sel(i32 %a, i32 %b) nounwind {
+; CHECK-LABEL: sel
+; CHECK sel r0, r0, r1
+ %tmp = call i32 @llvm.arm.sel(i32 %a, i32 %b)
+ ret i32 %tmp
+}
+
+define i32 @qadd8(i32 %a, i32 %b) nounwind {
+; CHECK-LABEL: qadd8
+; CHECK: qadd8 r0, r0, r1
+ %tmp = call i32 @llvm.arm.qadd8(i32 %a, i32 %b)
+ ret i32 %tmp
+}
+
+define i32 @qsub8(i32 %a, i32 %b) nounwind {
+; CHECK-LABEL: qsub8
+; CHECK: qsub8 r0, r0, r1
+ %tmp = call i32 @llvm.arm.qsub8(i32 %a, i32 %b)
+ ret i32 %tmp
+}
+
+define i32 @sadd8(i32 %a, i32 %b) nounwind {
+; CHECK-LABEL: sadd8
+; CHECK: sadd8 r0, r0, r1
+ %tmp = call i32 @llvm.arm.sadd8(i32 %a, i32 %b)
+ ret i32 %tmp
+}
+
+define i32 @shadd8(i32 %a, i32 %b) nounwind {
+; CHECK-LABEL: shadd8
+; CHECK: shadd8 r0, r0, r1
+ %tmp = call i32 @llvm.arm.shadd8(i32 %a, i32 %b)
+ ret i32 %tmp
+}
+
+define i32 @shsub8(i32 %a, i32 %b) nounwind {
+; CHECK-LABEL: shsub8
+; CHECK: shsub8 r0, r0, r1
+ %tmp = call i32 @llvm.arm.shsub8(i32 %a, i32 %b)
+ ret i32 %tmp
+}
+
+define i32 @ssub8(i32 %a, i32 %b) nounwind {
+; CHECK-LABEL: ssub8
+; CHECK: ssub8 r0, r0, r1
+ %tmp = call i32 @llvm.arm.ssub8(i32 %a, i32 %b)
+ ret i32 %tmp
+}
+
+define i32 @uadd8(i32 %a, i32 %b) nounwind {
+; CHECK-LABEL: uadd8
+; CHECK: uadd8 r0, r0, r1
+ %tmp = call i32 @llvm.arm.uadd8(i32 %a, i32 %b)
+ ret i32 %tmp
+}
+
+define i32 @uhadd8(i32 %a, i32 %b) nounwind {
+; CHECK-LABEL: uhadd8
+; CHECK: uhadd8 r0, r0, r1
+ %tmp = call i32 @llvm.arm.uhadd8(i32 %a, i32 %b)
+ ret i32 %tmp
+}
+
+define i32 @uhsub8(i32 %a, i32 %b) nounwind {
+; CHECK-LABEL: uhsub8
+; CHECK: uhsub8 r0, r0, r1
+ %tmp = call i32 @llvm.arm.uhsub8(i32 %a, i32 %b)
+ ret i32 %tmp
+}
+
+define i32 @uqadd8(i32 %a, i32 %b) nounwind {
+; CHECK-LABEL: uqadd8
+; CHECK: uqadd8 r0, r0, r1
+ %tmp = call i32 @llvm.arm.uqadd8(i32 %a, i32 %b)
+ ret i32 %tmp
+}
+
+define i32 @uqsub8(i32 %a, i32 %b) nounwind {
+; CHECK-LABEL: uqsub8
+; CHECK: uqsub8 r0, r0, r1
+ %tmp = call i32 @llvm.arm.uqsub8(i32 %a, i32 %b)
+ ret i32 %tmp
+}
+
+define i32 @usub8(i32 %a, i32 %b) nounwind {
+; CHECK-LABEL: usub8
+; CHECK: usub8 r0, r0, r1
+ %tmp = call i32 @llvm.arm.usub8(i32 %a, i32 %b)
+ ret i32 %tmp
+}
+
+define i32 @usad(i32 %a, i32 %b, i32 %c) nounwind {
+; CHECK-LABEL: usad
+; CHECK: usad8 r0, r0, r1
+; CHECK: usada8 r0, r0, r1, r2
+ %tmp = call i32 @llvm.arm.usad8(i32 %a, i32 %b)
+ %tmp1 = call i32 @llvm.arm.usada8(i32 %tmp, i32 %b, i32 %c)
+ ret i32 %tmp1
+}
+
+define i32 @qadd16(i32 %a, i32 %b) nounwind {
+; CHECK-LABEL: qadd16
+; CHECK: qadd16 r0, r0, r1
+ %tmp = call i32 @llvm.arm.qadd16(i32 %a, i32 %b)
+ ret i32 %tmp
+}
+
+define i32 @qasx(i32 %a, i32 %b) nounwind {
+; CHECK-LABEL: qasx
+; CHECK: qasx r0, r0, r1
+ %tmp = call i32 @llvm.arm.qasx(i32 %a, i32 %b)
+ ret i32 %tmp
+}
+
+define i32 @qsax(i32 %a, i32 %b) nounwind {
+; CHECK-LABEL: qsax
+; CHECK: qsax r0, r0, r1
+ %tmp = call i32 @llvm.arm.qsax(i32 %a, i32 %b)
+ ret i32 %tmp
+}
+
+define i32 @qsub16(i32 %a, i32 %b) nounwind {
+; CHECK-LABEL: qsub16
+; CHECK: qsub16 r0, r0, r1
+ %tmp = call i32 @llvm.arm.qsub16(i32 %a, i32 %b)
+ ret i32 %tmp
+}
+
+define i32 @sadd16(i32 %a, i32 %b) nounwind {
+; CHECK-LABEL: sadd16
+; CHECK: sadd16 r0, r0, r1
+ %tmp = call i32 @llvm.arm.sadd16(i32 %a, i32 %b)
+ ret i32 %tmp
+}
+
+define i32 @sasx(i32 %a, i32 %b) nounwind {
+; CHECK-LABEL: sasx
+; CHECK: sasx r0, r0, r1
+ %tmp = call i32 @llvm.arm.sasx(i32 %a, i32 %b)
+ ret i32 %tmp
+}
+
+define i32 @shadd16(i32 %a, i32 %b) nounwind {
+; CHECK-LABEL: shadd16
+; CHECK: shadd16 r0, r0, r1
+ %tmp = call i32 @llvm.arm.shadd16(i32 %a, i32 %b)
+ ret i32 %tmp
+}
+
+define i32 @shasx(i32 %a, i32 %b) nounwind {
+; CHECK-LABEL: shasx
+; CHECK: shasx r0, r0, r1
+ %tmp = call i32 @llvm.arm.shasx(i32 %a, i32 %b)
+ ret i32 %tmp
+}
+
+define i32 @shsax(i32 %a, i32 %b) nounwind {
+; CHECK-LABEL: shsax
+; CHECK: shsax r0, r0, r1
+ %tmp = call i32 @llvm.arm.shsax(i32 %a, i32 %b)
+ ret i32 %tmp
+}
+
+define i32 @shsub16(i32 %a, i32 %b) nounwind {
+; CHECK-LABEL: shsub16
+; CHECK: shsub16 r0, r0, r1
+ %tmp = call i32 @llvm.arm.shsub16(i32 %a, i32 %b)
+ ret i32 %tmp
+}
+
+define i32 @ssax(i32 %a, i32 %b) nounwind {
+; CHECK-LABEL: ssax
+; CHECK: ssax r0, r0, r1
+ %tmp = call i32 @llvm.arm.ssax(i32 %a, i32 %b)
+ ret i32 %tmp
+}
+
+define i32 @ssub16(i32 %a, i32 %b) nounwind {
+; CHECK-LABEL: ssub16
+; CHECK: ssub16 r0, r0, r1
+ %tmp = call i32 @llvm.arm.ssub16(i32 %a, i32 %b)
+ ret i32 %tmp
+}
+
+define i32 @uadd16(i32 %a, i32 %b) nounwind {
+; CHECK-LABEL: uadd16
+; CHECK: uadd16 r0, r0, r1
+ %tmp = call i32 @llvm.arm.uadd16(i32 %a, i32 %b)
+ ret i32 %tmp
+}
+
+define i32 @uasx(i32 %a, i32 %b) nounwind {
+; CHECK-LABEL: uasx
+; CHECK: uasx r0, r0, r1
+ %tmp = call i32 @llvm.arm.uasx(i32 %a, i32 %b)
+ ret i32 %tmp
+}
+
+define i32 @uhadd16(i32 %a, i32 %b) nounwind {
+; CHECK-LABEL: uhadd16
+; CHECK: uhadd16 r0, r0, r1
+ %tmp = call i32 @llvm.arm.uhadd16(i32 %a, i32 %b)
+ ret i32 %tmp
+}
+
+define i32 @uhasx(i32 %a, i32 %b) nounwind {
+; CHECK-LABEL: uhasx
+; CHECK: uhasx r0, r0, r1
+ %tmp = call i32 @llvm.arm.uhasx(i32 %a, i32 %b)
+ ret i32 %tmp
+}
+
+define i32 @uhsax(i32 %a, i32 %b) nounwind {
+; CHECK-LABEL: uhsax
+; CHECK: uhsax r0, r0, r1
+ %tmp = call i32 @llvm.arm.uhsax(i32 %a, i32 %b)
+ ret i32 %tmp
+}
+
+define i32 @uhsub16(i32 %a, i32 %b) nounwind {
+; CHECK-LABEL: uhsub16
+; CHECK: uhsub16 r0, r0, r1
+ %tmp = call i32 @llvm.arm.uhsub16(i32 %a, i32 %b)
+ ret i32 %tmp
+}
+
+define i32 @uqadd16(i32 %a, i32 %b) nounwind {
+; CHECK-LABEL: uqadd16
+; CHECK: uqadd16 r0, r0, r1
+ %tmp = call i32 @llvm.arm.uqadd16(i32 %a, i32 %b)
+ ret i32 %tmp
+}
+
+define i32 @uqasx(i32 %a, i32 %b) nounwind {
+; CHECK-LABEL: uqasx
+; CHECK: uqasx r0, r0, r1
+ %tmp = call i32 @llvm.arm.uqasx(i32 %a, i32 %b)
+ ret i32 %tmp
+}
+
+define i32 @uqsax(i32 %a, i32 %b) nounwind {
+; CHECK-LABEL: uqsax
+; CHECK: uqsax r0, r0, r1
+ %tmp = call i32 @llvm.arm.uqsax(i32 %a, i32 %b)
+ ret i32 %tmp
+}
+
+define i32 @uqsub16(i32 %a, i32 %b) nounwind {
+; CHECK-LABEL: uqsub16
+; CHECK: uqsub16 r0, r0, r1
+ %tmp = call i32 @llvm.arm.uqsub16(i32 %a, i32 %b)
+ ret i32 %tmp
+}
+
+define i32 @usax(i32 %a, i32 %b) nounwind {
+; CHECK-LABEL: usax
+; CHECK: usax r0, r0, r1
+ %tmp = call i32 @llvm.arm.usax(i32 %a, i32 %b)
+ ret i32 %tmp
+}
+
+define i32 @usub16(i32 %a, i32 %b) nounwind {
+; CHECK-LABEL: usub16
+; CHECK: usub16 r0, r0, r1
+ %tmp = call i32 @llvm.arm.usub16(i32 %a, i32 %b)
+ ret i32 %tmp
+}
+
+define i32 @smlad(i32 %a, i32 %b, i32 %c) nounwind {
+; CHECK-LABEL: smlad
+; CHECK: smlad r0, r0, r1, r2
+ %tmp = call i32 @llvm.arm.smlad(i32 %a, i32 %b, i32 %c)
+ ret i32 %tmp
+}
+
+define i32 @smladx(i32 %a, i32 %b, i32 %c) nounwind {
+; CHECK-LABEL: smladx
+; CHECK: smladx r0, r0, r1, r2
+ %tmp = call i32 @llvm.arm.smladx(i32 %a, i32 %b, i32 %c)
+ ret i32 %tmp
+}
+
+define i64 @smlald(i32 %a, i32 %b, i64 %c) nounwind {
+; CHECK-LABEL: smlald
+; CHECK: smlald r2, r3, r0, r1
+ %tmp = call i64 @llvm.arm.smlald(i32 %a, i32 %b, i64 %c)
+ ret i64 %tmp
+}
+
+define i64 @smlaldx(i32 %a, i32 %b, i64 %c) nounwind {
+; CHECK-LABEL: smlaldx
+; CHECK: smlaldx r2, r3, r0, r1
+ %tmp = call i64 @llvm.arm.smlaldx(i32 %a, i32 %b, i64 %c)
+ ret i64 %tmp
+}
+
+define i32 @smlsd(i32 %a, i32 %b, i32 %c) nounwind {
+; CHECK-LABEL: smlsd
+; CHECK: smlsd r0, r0, r1, r2
+ %tmp = call i32 @llvm.arm.smlsd(i32 %a, i32 %b, i32 %c)
+ ret i32 %tmp
+}
+
+define i32 @smlsdx(i32 %a, i32 %b, i32 %c) nounwind {
+; CHECK-LABEL: smlsdx
+; CHECK: smlsdx r0, r0, r1, r2
+ %tmp = call i32 @llvm.arm.smlsdx(i32 %a, i32 %b, i32 %c)
+ ret i32 %tmp
+}
+
+define i64 @smlsld(i32 %a, i32 %b, i64 %c) nounwind {
+; CHECK-LABEL: smlsld
+; CHECK: smlsld r2, r3, r0, r1
+ %tmp = call i64 @llvm.arm.smlsld(i32 %a, i32 %b, i64 %c)
+ ret i64 %tmp
+}
+
+define i64 @smlsldx(i32 %a, i32 %b, i64 %c) nounwind {
+; CHECK-LABEL: smlsldx
+; CHECK: smlsldx r2, r3, r0, r1
+ %tmp = call i64 @llvm.arm.smlsldx(i32 %a, i32 %b, i64 %c)
+ ret i64 %tmp
+}
+
+define i32 @smuad(i32 %a, i32 %b) nounwind {
+; CHECK-LABEL: smuad
+; CHECK: smuad r0, r0, r1
+ %tmp = call i32 @llvm.arm.smuad(i32 %a, i32 %b)
+ ret i32 %tmp
+}
+
+define i32 @smuadx(i32 %a, i32 %b) nounwind {
+;CHECK-LABEL: smuadx
+; CHECK: smuadx r0, r0, r1
+ %tmp = call i32 @llvm.arm.smuadx(i32 %a, i32 %b)
+ ret i32 %tmp
+}
+
+define i32 @smusd(i32 %a, i32 %b) nounwind {
+; CHECK-LABEL: smusd
+; CHECK: smusd r0, r0, r1
+ %tmp = call i32 @llvm.arm.smusd(i32 %a, i32 %b)
+ ret i32 %tmp
+}
+
+define i32 @smusdx(i32 %a, i32 %b) nounwind {
+; CHECK-LABEL: smusdx
+; CHECK: smusdx r0, r0, r1
+ %tmp = call i32 @llvm.arm.smusdx(i32 %a, i32 %b)
+ ret i32 %tmp
+}
+declare i32 @llvm.arm.ssat(i32, i32) nounwind readnone
+declare i32 @llvm.arm.usat(i32, i32) nounwind readnone
+declare i32 @llvm.arm.ssat16(i32, i32) nounwind
+declare i32 @llvm.arm.usat16(i32, i32) nounwind
+declare i32 @llvm.arm.sxtab16(i32, i32)
+declare i32 @llvm.arm.sxtb16(i32)
+declare i32 @llvm.arm.uxtab16(i32, i32)
+declare i32 @llvm.arm.uxtb16(i32)
+declare i32 @llvm.arm.sel(i32, i32) nounwind
+declare i32 @llvm.arm.qadd8(i32, i32) nounwind
+declare i32 @llvm.arm.qsub8(i32, i32) nounwind
+declare i32 @llvm.arm.sadd8(i32, i32) nounwind
+declare i32 @llvm.arm.shadd8(i32, i32) nounwind
+declare i32 @llvm.arm.shsub8(i32, i32) nounwind
+declare i32 @llvm.arm.ssub8(i32, i32) nounwind
+declare i32 @llvm.arm.uadd8(i32, i32) nounwind
+declare i32 @llvm.arm.uhadd8(i32, i32) nounwind
+declare i32 @llvm.arm.uhsub8(i32, i32) nounwind
+declare i32 @llvm.arm.uqadd8(i32, i32) nounwind
+declare i32 @llvm.arm.uqsub8(i32, i32) nounwind
+declare i32 @llvm.arm.usub8(i32, i32) nounwind
+declare i32 @llvm.arm.usad8(i32, i32) nounwind readnone
+declare i32 @llvm.arm.usada8(i32, i32, i32) nounwind readnone
+declare i32 @llvm.arm.qadd16(i32, i32) nounwind
+declare i32 @llvm.arm.qasx(i32, i32) nounwind
+declare i32 @llvm.arm.qsax(i32, i32) nounwind
+declare i32 @llvm.arm.qsub16(i32, i32) nounwind
+declare i32 @llvm.arm.sadd16(i32, i32) nounwind
+declare i32 @llvm.arm.sasx(i32, i32) nounwind
+declare i32 @llvm.arm.shadd16(i32, i32) nounwind
+declare i32 @llvm.arm.shasx(i32, i32) nounwind
+declare i32 @llvm.arm.shsax(i32, i32) nounwind
+declare i32 @llvm.arm.shsub16(i32, i32) nounwind
+declare i32 @llvm.arm.ssax(i32, i32) nounwind
+declare i32 @llvm.arm.ssub16(i32, i32) nounwind
+declare i32 @llvm.arm.uadd16(i32, i32) nounwind
+declare i32 @llvm.arm.uasx(i32, i32) nounwind
+declare i32 @llvm.arm.usax(i32, i32) nounwind
+declare i32 @llvm.arm.uhadd16(i32, i32) nounwind
+declare i32 @llvm.arm.uhasx(i32, i32) nounwind
+declare i32 @llvm.arm.uhsax(i32, i32) nounwind
+declare i32 @llvm.arm.uhsub16(i32, i32) nounwind
+declare i32 @llvm.arm.uqadd16(i32, i32) nounwind
+declare i32 @llvm.arm.uqasx(i32, i32) nounwind
+declare i32 @llvm.arm.uqsax(i32, i32) nounwind
+declare i32 @llvm.arm.uqsub16(i32, i32) nounwind
+declare i32 @llvm.arm.usub16(i32, i32) nounwind
+declare i32 @llvm.arm.smlad(i32, i32, i32) nounwind
+declare i32 @llvm.arm.smladx(i32, i32, i32) nounwind
+declare i64 @llvm.arm.smlald(i32, i32, i64) nounwind
+declare i64 @llvm.arm.smlaldx(i32, i32, i64) nounwind
+declare i32 @llvm.arm.smlsd(i32, i32, i32) nounwind
+declare i32 @llvm.arm.smlsdx(i32, i32, i32) nounwind
+declare i64 @llvm.arm.smlsld(i32, i32, i64) nounwind
+declare i64 @llvm.arm.smlsldx(i32, i32, i64) nounwind
+declare i32 @llvm.arm.smuad(i32, i32) nounwind
+declare i32 @llvm.arm.smuadx(i32, i32) nounwind
+declare i32 @llvm.arm.smusd(i32, i32) nounwind
+declare i32 @llvm.arm.smusdx(i32, i32) nounwind
diff --git a/test/CodeGen/ARM/alloca-align.ll b/test/CodeGen/ARM/alloca-align.ll
new file mode 100644
index 000000000000..3bba156f0ee0
--- /dev/null
+++ b/test/CodeGen/ARM/alloca-align.ll
@@ -0,0 +1,24 @@
+; RUN: llc -o - %s | FileCheck %s
+target triple="arm--"
+
+@glob = external global i32*
+
+declare void @bar(i32*, [20000 x i8]* byval)
+
+; CHECK-LABEL: foo:
+; We should see the stack getting additional alignment
+; CHECK: sub sp, sp, #16
+; CHECK: bic sp, sp, #31
+; And a base pointer getting used.
+; CHECK: mov r6, sp
+; Which is passed to the call
+; CHECK: add [[REG:r[0-9]+]], r6, #19456
+; CHECK: add r0, [[REG]], #536
+; CHECK: bl bar
+define void @foo([20000 x i8]* %addr) {
+ %tmp = alloca [4 x i32], align 32
+ %tmp0 = getelementptr [4 x i32], [4 x i32]* %tmp, i32 0, i32 0
+ call void @bar(i32* %tmp0, [20000 x i8]* byval %addr)
+ ret void
+}
+
diff --git a/test/CodeGen/ARM/cmp1-peephole-thumb.mir b/test/CodeGen/ARM/cmp1-peephole-thumb.mir
index 5ace58fd0658..3e87ced0ee57 100644
--- a/test/CodeGen/ARM/cmp1-peephole-thumb.mir
+++ b/test/CodeGen/ARM/cmp1-peephole-thumb.mir
@@ -55,7 +55,6 @@ frameInfo:
# CHECK-NOT: tCMPi8
body: |
bb.0.entry:
- successors: %bb.1.entry(0x40000000), %bb.2.entry(0x40000000)
liveins: %r0, %r1
%1 = COPY %r1
@@ -67,8 +66,6 @@ body: |
tBcc %bb.2.entry, 0, %cpsr
bb.1.entry:
- successors: %bb.2.entry(0x80000000)
-
bb.2.entry:
%5 = PHI %4, %bb.1.entry, %3, %bb.0.entry
diff --git a/test/CodeGen/ARM/cmp2-peephole-thumb.mir b/test/CodeGen/ARM/cmp2-peephole-thumb.mir
index 6e9ca70f1741..a31086d2113e 100644
--- a/test/CodeGen/ARM/cmp2-peephole-thumb.mir
+++ b/test/CodeGen/ARM/cmp2-peephole-thumb.mir
@@ -76,7 +76,6 @@ stack:
# CHECK-NEXT: tCMPi8
body: |
bb.0.entry:
- successors: %bb.1.if.then(0x40000000), %bb.2.if.end(0x40000000)
liveins: %r0, %r1
%1 = COPY %r1
@@ -88,15 +87,11 @@ body: |
tB %bb.1.if.then, 14, _
bb.1.if.then:
- successors: %bb.3.return(0x80000000)
-
%4, %cpsr = tMOVi8 42, 14, _
tSTRspi killed %4, %stack.0.retval, 0, 14, _ :: (store 4 into %ir.retval)
tB %bb.3.return, 14, _
bb.2.if.end:
- successors: %bb.3.return(0x80000000)
-
%3, %cpsr = tMOVi8 1, 14, _
tSTRspi killed %3, %stack.0.retval, 0, 14, _ :: (store 4 into %ir.retval)
diff --git a/test/CodeGen/ARM/dbg-range-extension.mir b/test/CodeGen/ARM/dbg-range-extension.mir
index 466f69396948..a79607705c1c 100644
--- a/test/CodeGen/ARM/dbg-range-extension.mir
+++ b/test/CodeGen/ARM/dbg-range-extension.mir
@@ -209,7 +209,6 @@ stack:
- { id: 5, type: spill-slot, offset: -24, size: 4, alignment: 4, callee-saved-register: '%r4' }
body: |
bb.0.entry:
- successors: %bb.5.if.end, %bb.1.if.then
liveins: %r0, %r4, %r5, %r6, %r7, %r11, %lr
%sp = frame-setup STMDB_UPD %sp, 14, _, killed %r4, killed %r5, killed %r6, killed %r7, killed %r11, killed %lr
@@ -232,7 +231,6 @@ body: |
Bcc %bb.5.if.end, 0, killed %cpsr
bb.1.if.then:
- successors: %bb.3.for.cond
liveins: %r4, %r5
%r0 = MOVi 12, 14, _, _, debug-location !26
@@ -245,7 +243,6 @@ body: |
B %bb.3.for.cond
bb.2.for.body:
- successors: %bb.3.for.cond
liveins: %r4, %r5, %r6, %r7
%r1 = ADDrr %r5, %r7, 14, _, _, debug-location !36
@@ -255,7 +252,6 @@ body: |
DBG_VALUE debug-use %r7, debug-use _, !18, !20, debug-location !28
bb.3.for.cond:
- successors: %bb.2.for.body, %bb.4.for.cond.cleanup
liveins: %r4, %r5, %r6, %r7
DBG_VALUE debug-use %r7, debug-use _, !18, !20, debug-location !28
@@ -263,7 +259,6 @@ body: |
Bcc %bb.2.for.body, 11, killed %cpsr, debug-location !33
bb.4.for.cond.cleanup:
- successors: %bb.5.if.end
liveins: %r4, %r5, %r6
%r0 = MOVr %r5, 14, _, _, debug-location !34
diff --git a/test/CodeGen/ARM/sat-arith.ll b/test/CodeGen/ARM/sat-arith.ll
deleted file mode 100644
index 4844ed1bd21e..000000000000
--- a/test/CodeGen/ARM/sat-arith.ll
+++ /dev/null
@@ -1,63 +0,0 @@
-; RUN: llc -O1 -mtriple=armv6-none-none-eabi %s -o - | FileCheck %s -check-prefix=ARM -check-prefix=CHECK
-; RUN: llc -O1 -mtriple=thumbv7-none-none-eabi %s -o - | FileCheck %s -check-prefix=THUMB -check-prefix=CHECK
-
-; CHECK-LABEL: qadd
-define i32 @qadd() nounwind {
-; CHECK-DAG: mov{{s?}} [[R0:.*]], #8
-; CHECK-DAG: mov{{s?}} [[R1:.*]], #128
-; CHECK-ARM: qadd [[R0]], [[R1]], [[R0]]
-; CHECK-THRUMB: qadd [[R0]], [[R0]], [[R1]]
- %tmp = call i32 @llvm.arm.qadd(i32 128, i32 8)
- ret i32 %tmp
-}
-
-; CHECK-LABEL: qsub
-define i32 @qsub() nounwind {
-; CHECK-DAG: mov{{s?}} [[R0:.*]], #8
-; CHECK-DAG: mov{{s?}} [[R1:.*]], #128
-; CHECK-ARM: qsub [[R0]], [[R1]], [[R0]]
-; CHECK-THRUMB: qadd [[R0]], [[R1]], [[R0]]
- %tmp = call i32 @llvm.arm.qsub(i32 128, i32 8)
- ret i32 %tmp
-}
-
-; upper-bound of the immediate argument
-; CHECK-LABEL: ssat1
-define i32 @ssat1() nounwind {
-; CHECK: mov{{s?}} [[R0:.*]], #128
-; CHECK: ssat [[R1:.*]], #32, [[R0]]
- %tmp = call i32 @llvm.arm.ssat(i32 128, i32 32)
- ret i32 %tmp
-}
-
-; lower-bound of the immediate argument
-; CHECK-LABEL: ssat2
-define i32 @ssat2() nounwind {
-; CHECK: mov{{s?}} [[R0:.*]], #128
-; CHECK: ssat [[R1:.*]], #1, [[R0]]
- %tmp = call i32 @llvm.arm.ssat(i32 128, i32 1)
- ret i32 %tmp
-}
-
-; upper-bound of the immediate argument
-; CHECK-LABEL: usat1
-define i32 @usat1() nounwind {
-; CHECK: mov{{s?}} [[R0:.*]], #128
-; CHECK: usat [[R1:.*]], #31, [[R0]]
- %tmp = call i32 @llvm.arm.usat(i32 128, i32 31)
- ret i32 %tmp
-}
-
-; lower-bound of the immediate argument
-; CHECK-LABEL: usat2
-define i32 @usat2() nounwind {
-; CHECK: mov{{s?}} [[R0:.*]], #128
-; CHECK: usat [[R1:.*]], #0, [[R0]]
- %tmp = call i32 @llvm.arm.usat(i32 128, i32 0)
- ret i32 %tmp
-}
-
-declare i32 @llvm.arm.qadd(i32, i32) nounwind
-declare i32 @llvm.arm.qsub(i32, i32) nounwind
-declare i32 @llvm.arm.ssat(i32, i32) nounwind readnone
-declare i32 @llvm.arm.usat(i32, i32) nounwind readnone
diff --git a/test/CodeGen/ARM/vabs.ll b/test/CodeGen/ARM/vabs.ll
index 38c6d6c28aed..4295b32d25fc 100644
--- a/test/CodeGen/ARM/vabs.ll
+++ b/test/CodeGen/ARM/vabs.ll
@@ -8,6 +8,22 @@ define <8 x i8> @vabss8(<8 x i8>* %A) nounwind {
ret <8 x i8> %tmp2
}
+define <8 x i8> @vabss8_fold(<8 x i8>* %A) nounwind {
+; CHECK-LABEL: vabss8_fold:
+; CHECK: vldr d16, .LCPI1_0
+; CHECK: .LCPI1_0:
+; CHECK-NEXT: .byte 128 @ 0x80
+; CHECK-NEXT: .byte 127 @ 0x7f
+; CHECK-NEXT: .byte 1 @ 0x1
+; CHECK-NEXT: .byte 0 @ 0x0
+; CHECK-NEXT: .byte 1 @ 0x1
+; CHECK-NEXT: .byte 127 @ 0x7f
+; CHECK-NEXT: .byte 128 @ 0x80
+; CHECK-NEXT: .byte 1 @ 0x1
+ %tmp1 = call <8 x i8> @llvm.arm.neon.vabs.v8i8(<8 x i8> <i8 -128, i8 -127, i8 -1, i8 0, i8 1, i8 127, i8 128, i8 255>)
+ ret <8 x i8> %tmp1
+}
+
define <4 x i16> @vabss16(<4 x i16>* %A) nounwind {
;CHECK-LABEL: vabss16:
;CHECK: vabs.s16
@@ -16,6 +32,18 @@ define <4 x i16> @vabss16(<4 x i16>* %A) nounwind {
ret <4 x i16> %tmp2
}
+define <4 x i16> @vabss16_fold() nounwind {
+; CHECK-LABEL: vabss16_fold:
+; CHECK: vldr d16, .LCPI3_0
+; CHECK: .LCPI3_0:
+; CHECK-NEXT: .short 32768 @ 0x8000
+; CHECK-NEXT: .short 32767 @ 0x7fff
+; CHECK-NEXT: .short 255 @ 0xff
+; CHECK-NEXT: .short 32768 @ 0x8000
+ %tmp1 = call <4 x i16> @llvm.arm.neon.vabs.v4i16(<4 x i16> <i16 -32768, i16 -32767, i16 255, i16 32768>)
+ ret <4 x i16> %tmp1
+}
+
define <2 x i32> @vabss32(<2 x i32>* %A) nounwind {
;CHECK-LABEL: vabss32:
;CHECK: vabs.s32
@@ -24,6 +52,16 @@ define <2 x i32> @vabss32(<2 x i32>* %A) nounwind {
ret <2 x i32> %tmp2
}
+define <2 x i32> @vabss32_fold() nounwind {
+; CHECK-LABEL: vabss32_fold:
+; CHECK: vldr d16, .LCPI5_0
+; CHECK: .LCPI5_0:
+; CHECK-NEXT: .long 2147483647 @ 0x7fffffff
+; CHECK-NEXT: .long 2147483648 @ 0x80000000
+ %tmp1 = call <2 x i32> @llvm.arm.neon.vabs.v2i32(<2 x i32> <i32 -2147483647, i32 2147483648>)
+ ret <2 x i32> %tmp1
+}
+
define <2 x float> @vabsf32(<2 x float>* %A) nounwind {
;CHECK-LABEL: vabsf32:
;CHECK: vabs.f32
diff --git a/test/CodeGen/ARM/xray-armv6-attribute-instrumentation.ll b/test/CodeGen/ARM/xray-armv6-attribute-instrumentation.ll
index 93c3cb14fb73..5e3c45c3454d 100644
--- a/test/CodeGen/ARM/xray-armv6-attribute-instrumentation.ll
+++ b/test/CodeGen/ARM/xray-armv6-attribute-instrumentation.ll
@@ -25,7 +25,13 @@ define i32 @foo() nounwind noinline uwtable "function-instrument"="xray-always"
}
; CHECK: .p2align 4
; CHECK-NEXT: .long {{.*}}Lxray_synthetic_0
+; CHECK-NEXT: .long {{.*}}Lxray_fn_idx_synth_0
; CHECK-NEXT: .section {{.*}}xray_instr_map{{.*}}
; CHECK-LABEL: Lxray_synthetic_0:
; CHECK: .long {{.*}}Lxray_sled_0
; CHECK: .long {{.*}}Lxray_sled_1
+; CHECK-LABEL: Lxray_synthetic_end0:
+; CHECK: .section {{.*}}xray_fn_idx{{.*}}
+; CHECK-LABEL: Lxray_fn_idx_synth_0:
+; CHECK: .long {{.*}}Lxray_synthetic_0
+; CHECK-NEXT: .long {{.*}}Lxray_synthetic_end0
diff --git a/test/CodeGen/ARM/xray-armv7-attribute-instrumentation.ll b/test/CodeGen/ARM/xray-armv7-attribute-instrumentation.ll
index d14590b88679..739151fbdd5e 100644
--- a/test/CodeGen/ARM/xray-armv7-attribute-instrumentation.ll
+++ b/test/CodeGen/ARM/xray-armv7-attribute-instrumentation.ll
@@ -25,7 +25,14 @@ define i32 @foo() nounwind noinline uwtable "function-instrument"="xray-always"
}
; CHECK: .p2align 4
; CHECK-NEXT: .long {{.*}}Lxray_synthetic_0
+; CHECK-NEXT: .long {{.*}}Lxray_fn_idx_synth_0
; CHECK-NEXT: .section {{.*}}xray_instr_map{{.*}}
; CHECK-LABEL: Lxray_synthetic_0:
; CHECK: .long {{.*}}Lxray_sled_0
; CHECK: .long {{.*}}Lxray_sled_1
+; CHECK-LABEL: Lxray_synthetic_end0:
+; CHECK: .section {{.*}}xray_fn_idx{{.*}}
+; CHECK-LABEL: Lxray_fn_idx_synth_0:
+; CHECK: .long {{.*}}xray_synthetic_0
+; CHECK-NEXT: .long {{.*}}xray_synthetic_end0
+
diff --git a/test/CodeGen/BPF/dwarfdump.ll b/test/CodeGen/BPF/dwarfdump.ll
index 7ae64dfb5682..6a6913011e64 100644
--- a/test/CodeGen/BPF/dwarfdump.ll
+++ b/test/CodeGen/BPF/dwarfdump.ll
@@ -1,5 +1,7 @@
; RUN: llc -O2 -march=bpfel %s -o %t -filetype=obj
; RUN: llvm-dwarfdump -debug-dump=line %t | FileCheck %s
+; RUN: llc -O2 -march=bpfeb %s -o %t -filetype=obj
+; RUN: llvm-dwarfdump -debug-dump=line %t | FileCheck %s
source_filename = "testprog.c"
target datalayout = "e-m:e-p:64:64-i64:64-n32:64-S128"
diff --git a/test/CodeGen/Hexagon/branch-folder-hoist-kills.mir b/test/CodeGen/Hexagon/branch-folder-hoist-kills.mir
new file mode 100644
index 000000000000..a746d826265b
--- /dev/null
+++ b/test/CodeGen/Hexagon/branch-folder-hoist-kills.mir
@@ -0,0 +1,59 @@
+# RUN: llc -march=hexagon -run-pass branch-folder -run-pass if-converter -verify-machineinstrs %s -o - | FileCheck %s
+
+# The hoisting of common instructions from successors could cause registers
+# to no longer be live-in in the successor blocks. The liveness was updated
+# to include potential new live-in registres, but not to remove registers
+# that were no longer live-in.
+# This could cause if-converter to generate incorrect code.
+#
+# In this testcase, the "r1 = A2_sxth r0<kill>" was hoisted, and since r0
+# was killed, it was no longer live-in in either successor. The if-converter
+# then created code, where the first predicated instruction has incorrect
+# implicit use of r0:
+#
+# BB#0:
+# Live Ins: %R0
+# %R1<def> = A2_sxth %R0<kill> ; hoisted, kills r0
+# A2_nop %P0<imp-def>
+# %R0<def> = C2_cmoveit %P0, 2, %R0<imp-use> ; predicated A2_tfrsi
+# %R0<def> = C2_cmoveif %P0, 1, %R0<imp-use> ; predicated A2_tfrsi
+# %R0<def> = A2_add %R0<kill>, %R1<kill>
+# J2_jumpr %R31, %PC<imp-def,dead>
+#
+
+# CHECK: %r1 = A2_sxth killed %r0
+# CHECK: %r0 = C2_cmoveit %p0, 2
+# CHECK-NOT: implicit-def %r0
+# CHECK: %r0 = C2_cmoveif %p0, 1, implicit %r0
+
+---
+name: fred
+tracksRegLiveness: true
+
+body: |
+ bb.0:
+ liveins: %r0
+ successors: %bb.1, %bb.2
+
+ A2_nop implicit-def %p0
+ J2_jumpt killed %p0, %bb.2, implicit-def dead %pc
+
+ bb.1:
+ successors: %bb.3
+ liveins: %r0
+ %r1 = A2_sxth killed %r0
+ %r0 = A2_tfrsi 1
+ J2_jump %bb.3, implicit-def %pc
+
+ bb.2:
+ successors: %bb.3
+ liveins: %r0
+ %r1 = A2_sxth killed %r0
+ %r0 = A2_tfrsi 2
+
+ bb.3:
+ liveins: %r0, %r1
+ %r0 = A2_add killed %r0, killed %r1
+ J2_jumpr %r31, implicit-def dead %pc
+...
+
diff --git a/test/CodeGen/Hexagon/rdf-cover-use.ll b/test/CodeGen/Hexagon/rdf-cover-use.ll
new file mode 100644
index 000000000000..4f3de0868aa6
--- /dev/null
+++ b/test/CodeGen/Hexagon/rdf-cover-use.ll
@@ -0,0 +1,38 @@
+; RUN: llc -march=hexagon -verify-machineinstrs < %s | FileCheck %s
+
+; Check for sane output.
+; CHECK: vmpyweh
+
+target triple = "hexagon"
+
+declare i32 @llvm.hexagon.S2.clb(i32) #0
+declare i32 @llvm.hexagon.S2.asl.r.r(i32, i32) #0
+declare i32 @llvm.hexagon.S2.vrndpackwh(i64) #0
+declare i64 @llvm.hexagon.M2.mmpyl.s1(i64, i64) #0
+
+define i64 @fred(i32 %a0, i32 %a1) local_unnamed_addr #1 {
+b2:
+ br i1 undef, label %b15, label %b3
+
+b3: ; preds = %b2
+ %v4 = tail call i32 @llvm.hexagon.S2.clb(i32 %a1) #0
+ %v5 = add nsw i32 %v4, -32
+ %v6 = zext i32 %v5 to i64
+ %v7 = shl nuw i64 %v6, 32
+ %v8 = or i64 %v7, 0
+ %v9 = tail call i32 @llvm.hexagon.S2.asl.r.r(i32 %a0, i32 0)
+ %v10 = tail call i32 @llvm.hexagon.S2.vrndpackwh(i64 %v8)
+ %v11 = sext i32 %v9 to i64
+ %v12 = sext i32 %v10 to i64
+ %v13 = tail call i64 @llvm.hexagon.M2.mmpyl.s1(i64 %v11, i64 %v12)
+ %v14 = and i64 %v13, 4294967295
+ br label %b15
+
+b15: ; preds = %b3, %b2
+ %v16 = phi i64 [ %v14, %b3 ], [ 0, %b2 ]
+ %v17 = or i64 0, %v16
+ ret i64 %v17
+}
+
+attributes #0 = { nounwind readnone }
+attributes #1 = { nounwind "target-cpu"="hexagonv55" }
diff --git a/test/CodeGen/Hexagon/swp-matmul-bitext.ll b/test/CodeGen/Hexagon/swp-matmul-bitext.ll
index 9c425ae6a098..3b26d141238a 100644
--- a/test/CodeGen/Hexagon/swp-matmul-bitext.ll
+++ b/test/CodeGen/Hexagon/swp-matmul-bitext.ll
@@ -1,17 +1,16 @@
-; RUN: llc -march=hexagon -mcpu=hexagonv60 -enable-bsb-sched=0 -enable-pipeliner < %s | FileCheck %s
-; RUN: llc -march=hexagon -mcpu=hexagonv5 -enable-pipeliner < %s | FileCheck %s
+; RUN: llc -march=hexagon -mcpu=hexagonv60 -enable-pipeliner < %s | FileCheck %s
; From coremark. Test that we pipeline the matrix multiplication bitextract
; function. The pipelined code should have two packets.
; CHECK: loop0(.LBB0_[[LOOP:.]],
; CHECK: .LBB0_[[LOOP]]:
-; CHECK: = extractu([[REG2:(r[0-9]+)]],
-; CHECK: = extractu([[REG2]],
-; CHECK: [[REG0:(r[0-9]+)]] = memh
-; CHECK: [[REG1:(r[0-9]+)]] = memh
+; CHECK: [[REG0:(r[0-9]+)]] = mpyi([[REG1:(r[0-9]+)]],[[REG2:(r[0-9]+)]])
; CHECK: += mpyi
-; CHECK: [[REG2]] = mpyi([[REG0]],[[REG1]])
+; CHECK: [[REG1:(r[0-9]+)]] = memh
+; CHECK: = extractu([[REG0:(r[0-9]+)]],
+; CHECK: = extractu([[REG0]],
+; CHECK: [[REG2:(r[0-9]+)]] = memh
; CHECK: endloop0
%union_h2_sem_t = type { i32 }
diff --git a/test/CodeGen/MIR/Generic/branch-probabilities.ll b/test/CodeGen/MIR/Generic/branch-probabilities.ll
deleted file mode 100644
index 8d119316b134..000000000000
--- a/test/CodeGen/MIR/Generic/branch-probabilities.ll
+++ /dev/null
@@ -1,28 +0,0 @@
-; RUN: llc -stop-after machine-sink %s -o %t.mir
-; RUN: FileCheck %s < %t.mir
-; RUN: llc %t.mir -run-pass machine-sink
-; Check that branch probabilities are printed in a format that can then be parsed.
-; This test fails on powerpc because of an undefined physical register use in the MIR. See PR31062.
-; XFAIL: powerpc
-
-declare void @foo()
-declare void @bar()
-
-define void @test(i1 %c) {
-; CHECK-LABEL: name: test
-entry:
- br i1 %c, label %then, label %else
-
-then:
- call void @foo()
- br label %end
-; CHECK: successors: %{{[a-z0-9\-\.]+}}({{0x[0-9a-f]+}}), %{{[a-z0-9\-\.]+}}({{0x[0-9a-f]+}})
-
-else:
- call void @bar()
- br label %end
-; CHECK: successors: %{{[a-z0-9\-\.]+}}({{0x[0-9a-f]+}})
-
-end:
- ret void
-}
diff --git a/test/CodeGen/MIR/X86/auto-successor.mir b/test/CodeGen/MIR/X86/auto-successor.mir
new file mode 100644
index 000000000000..23b4f91b3b60
--- /dev/null
+++ b/test/CodeGen/MIR/X86/auto-successor.mir
@@ -0,0 +1,61 @@
+# RUN: llc -mtriple=x86_64-- -o - %s -run-pass=none -verify-machineinstrs -simplify-mir | FileCheck %s
+---
+# We shouldn't need any explicit successor lists in these examples
+# CHECK-LABEL: name: func0
+# CHECK: bb.0:
+# CHECK-NOT: successors
+# CHECK: JE_1 %bb.1, implicit undef %eflags
+# CHECK: JMP_1 %bb.3
+# CHECK: bb.1:
+# CHECK-NOT: successors
+# CHECK: bb.2:
+# CHECK-NOT: successors
+# CHECK: JE_1 %bb.1, implicit undef %eflags
+# CHECK: bb.3:
+# CHECK: RETQ undef %eax
+name: func0
+body: |
+ bb.0:
+ JE_1 %bb.1, implicit undef %eflags
+ JMP_1 %bb.3
+
+ bb.1:
+
+ bb.2:
+ JE_1 %bb.1, implicit undef %eflags
+
+ bb.3:
+ JE_1 %bb.4, implicit undef %eflags ; condjump+fallthrough to same block
+
+ bb.4:
+ RETQ undef %eax
+...
+---
+# Some cases that need explicit successors:
+# CHECK-LABEL: name: func1
+name: func1
+body: |
+ bb.0:
+ ; CHECK: bb.0:
+ ; CHECK: successors: %bb.3, %bb.1
+ successors: %bb.3, %bb.1 ; different order than operands
+ JE_1 %bb.1, implicit undef %eflags
+ JMP_1 %bb.3
+
+ bb.1:
+ ; CHECK: bb.1:
+ ; CHECK: successors: %bb.2, %bb.1
+ successors: %bb.2, %bb.1 ; different order (fallthrough variant)
+ JE_1 %bb.1, implicit undef %eflags
+
+ bb.2:
+ ; CHECK: bb.2:
+ ; CHECK: successors: %bb.1(0x60000000), %bb.3(0x20000000)
+ successors: %bb.1(3), %bb.3(1) ; branch probabilities not normalized
+ JE_1 %bb.1, implicit undef %eflags
+
+ bb.3:
+ ; CHECK: bb.3:
+ ; CHECK: RETQ undef %eax
+ RETQ undef %eax
+...
diff --git a/test/CodeGen/MIR/X86/branch-probabilities.mir b/test/CodeGen/MIR/X86/branch-probabilities.mir
new file mode 100644
index 000000000000..4aacd2d5cef1
--- /dev/null
+++ b/test/CodeGen/MIR/X86/branch-probabilities.mir
@@ -0,0 +1,18 @@
+# RUN: llc -o - %s -mtriple=x86_64-- -run-pass=none | FileCheck %s
+---
+# Check that branch probabilities are printed correctly as hex numbers.
+# CHECK-LABEL: name: test
+# CHECK: bb.0:
+# CHECK-NEXT: successors: %bb.1(0x66666666), %bb.2(0x1999999a)
+name: test
+body: |
+ bb.0:
+ successors: %bb.1(4), %bb.2(1)
+ JE_1 %bb.2, implicit undef %eflags
+
+ bb.1:
+ NOOP
+
+ bb.2:
+ RETQ undef %eax
+...
diff --git a/test/CodeGen/MIR/X86/successor-basic-blocks.mir b/test/CodeGen/MIR/X86/successor-basic-blocks.mir
index 395272bb23c0..ffeb04af9e40 100644
--- a/test/CodeGen/MIR/X86/successor-basic-blocks.mir
+++ b/test/CodeGen/MIR/X86/successor-basic-blocks.mir
@@ -32,7 +32,6 @@
name: foo
body: |
; CHECK-LABEL: bb.0.entry:
- ; CHECK: successors: %bb.1.less(0x40000000), %bb.2.exit(0x40000000)
; CHECK-LABEL: bb.1.less:
bb.0.entry:
successors: %bb.1.less, %bb.2.exit
diff --git a/test/CodeGen/PowerPC/restore-r30.ll b/test/CodeGen/PowerPC/restore-r30.ll
new file mode 100644
index 000000000000..216d5a709340
--- /dev/null
+++ b/test/CodeGen/PowerPC/restore-r30.ll
@@ -0,0 +1,30 @@
+; RUN: llc -march=ppc32 -relocation-model=pic < %s | FileCheck %s
+
+; The load restoring r30 at the end of the function was placed out of order
+; relative to its uses as the PIC base pointer.
+; This was because the r30 operand was not marked as "def" which allowed
+; the post-RA scheduler to move it over other uses of r30.
+
+; CHECK-LABEL: fred
+; CHECK: lwz 30, 24(1)
+; R30 should not appear in an instruction after it's been restored.
+; CHECK-NOT: 30,
+
+target datalayout = "E-m:e-p:32:32-i64:64-n32"
+target triple = "powerpc"
+
+define double @fred(i64 %a) #0 {
+entry:
+ %0 = lshr i64 %a, 32
+ %conv = trunc i64 %0 to i32
+ %conv1 = sitofp i32 %conv to double
+ %mul = fmul double %conv1, 0x41F0000000000000
+ %and = and i64 %a, 4294967295
+ %or = or i64 %and, 4841369599423283200
+ %sub = fadd double %mul, 0xC330000000000000
+ %1 = bitcast i64 %or to double
+ %add = fadd double %sub, %1
+ ret double %add
+}
+
+attributes #0 = { norecurse nounwind readnone "target-cpu"="ppc" "use-soft-float"="false" }
diff --git a/test/CodeGen/SystemZ/copy-physreg-128.ll b/test/CodeGen/SystemZ/copy-physreg-128.ll
new file mode 100644
index 000000000000..408316140605
--- /dev/null
+++ b/test/CodeGen/SystemZ/copy-physreg-128.ll
@@ -0,0 +1,68 @@
+; RUN: llc < %s -mtriple=s390x-linux-gnu -mcpu=z13 -join-liveintervals=false -verify-machineinstrs | FileCheck %s
+;
+; Check that copyPhysReg() properly adds impl-use operands of the super
+; register while lowering a COPY of a GR128 bit reg.
+
+define void @autogen_SD5585(i32*, i64) {
+; CHECK: .text
+BB:
+ %L5 = load i1, i1* undef
+ %I8 = insertelement <8 x i64> undef, i64 %1, i32 3
+ %I21 = insertelement <8 x i64> zeroinitializer, i64 475435, i32 5
+ br label %CF290
+
+CF290: ; preds = %CF290, %BB
+ %B29 = urem <8 x i64> %I8, %I21
+ %Cmp31 = icmp sge i1 undef, undef
+ br i1 %Cmp31, label %CF290, label %CF296
+
+CF296: ; preds = %CF290
+ %FC36 = sitofp <8 x i64> %B29 to <8 x double>
+ br label %CF302
+
+CF302: ; preds = %CF307, %CF296
+ %Shuff49 = shufflevector <8 x i64> undef, <8 x i64> zeroinitializer, <8 x i32> <i32 undef, i32 9, i32 11, i32 undef, i32 15, i32 1, i32 3, i32 5>
+ %L69 = load i16, i16* undef
+ br label %CF307
+
+CF307: ; preds = %CF302
+ %Cmp84 = icmp ne i16 undef, %L69
+ br i1 %Cmp84, label %CF302, label %CF301
+
+CF301: ; preds = %CF307
+ %B126 = or i32 514315, undef
+ br label %CF280
+
+CF280: ; preds = %CF280, %CF301
+ %I139 = insertelement <8 x i64> %Shuff49, i64 undef, i32 2
+ %B155 = udiv <8 x i64> %I8, %I139
+ %Cmp157 = icmp ne i64 -1, undef
+ br i1 %Cmp157, label %CF280, label %CF281
+
+CF281: ; preds = %CF280
+ %Cmp164 = icmp slt i1 %L5, %Cmp84
+ br label %CF282
+
+CF282: ; preds = %CF304, %CF281
+ br label %CF289
+
+CF289: ; preds = %CF289, %CF282
+ store i32 %B126, i32* %0
+ %Cmp219 = icmp slt i64 undef, undef
+ br i1 %Cmp219, label %CF289, label %CF304
+
+CF304: ; preds = %CF289
+ %Cmp234 = icmp ult i64 0, undef
+ br i1 %Cmp234, label %CF282, label %CF283
+
+CF283: ; preds = %CF308, %CF283, %CF304
+ %E251 = extractelement <8 x i64> %B155, i32 0
+ br i1 undef, label %CF283, label %CF308
+
+CF308: ; preds = %CF283
+ store i1 %Cmp164, i1* undef
+ br i1 undef, label %CF283, label %CF293
+
+CF293: ; preds = %CF308
+ ret void
+}
diff --git a/test/CodeGen/X86/2014-08-29-CompactUnwind.ll b/test/CodeGen/X86/2014-08-29-CompactUnwind.ll
index e7e8bb724fc0..f6d6bd3ed6f7 100644
--- a/test/CodeGen/X86/2014-08-29-CompactUnwind.ll
+++ b/test/CodeGen/X86/2014-08-29-CompactUnwind.ll
@@ -24,7 +24,7 @@ target triple = "x86_64-apple-macosx10.9.0"
; CHECK-NOT: {{compact encoding:.*0x0309f800}}
; CHECK: {{compact encoding:.*0x030df800}}
-define void @__asan_report_error() #0 {
+define void @__asan_report_error(i64 %step) #0 {
%str.i = alloca i64, align 8
%stack = alloca [256 x i64], align 8
br label %print_shadow_bytes.exit.i
@@ -37,7 +37,7 @@ print_shadow_bytes.exit.i: ; preds = %print_shadow_bytes.exit.i, %0
%reg17 = shl i64 %iv.i, 1
%reg19 = inttoptr i64 %reg17 to i8*
call void (i64*, i8*, ...) @append(i64* %str.i, i8* getelementptr inbounds ([6 x i8], [6 x i8]* @.str2, i64 0, i64 0), i8* %reg16, i8* %reg19)
- %iv.next.i = add nsw i64 %iv.i, 0
+ %iv.next.i = add nsw i64 %iv.i, %step
br label %print_shadow_bytes.exit.i
}
diff --git a/test/CodeGen/X86/GlobalISel/gep.ll b/test/CodeGen/X86/GlobalISel/gep.ll
new file mode 100644
index 000000000000..bc5b0152b24a
--- /dev/null
+++ b/test/CodeGen/X86/GlobalISel/gep.ll
@@ -0,0 +1,136 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -mtriple=x86_64-linux-gnu -global-isel < %s -o - | FileCheck %s --check-prefix=ALL --check-prefix=X64_GISEL
+; RUN: llc -mtriple=x86_64-linux-gnu < %s -o - | FileCheck %s --check-prefix=ALL --check-prefix=X64
+
+define i32* @test_gep_i8(i32 *%arr, i8 %ind) {
+; X64_GISEL-LABEL: test_gep_i8:
+; X64_GISEL: # BB#0:
+; X64_GISEL-NEXT: movq $4, %rax
+; X64_GISEL-NEXT: movsbq %sil, %rcx
+; X64_GISEL-NEXT: imulq %rax, %rcx
+; X64_GISEL-NEXT: leaq (%rdi,%rcx), %rax
+; X64_GISEL-NEXT: retq
+;
+; X64-LABEL: test_gep_i8:
+; X64: # BB#0:
+; X64-NEXT: # kill: %ESI<def> %ESI<kill> %RSI<def>
+; X64-NEXT: movsbq %sil, %rax
+; X64-NEXT: leaq (%rdi,%rax,4), %rax
+; X64-NEXT: retq
+ %arrayidx = getelementptr i32, i32* %arr, i8 %ind
+ ret i32* %arrayidx
+}
+
+define i32* @test_gep_i8_const(i32 *%arr) {
+; X64_GISEL-LABEL: test_gep_i8_const:
+; X64_GISEL: # BB#0:
+; X64_GISEL-NEXT: movq $80, %rax
+; X64_GISEL-NEXT: leaq (%rdi,%rax), %rax
+; X64_GISEL-NEXT: retq
+;
+; X64-LABEL: test_gep_i8_const:
+; X64: # BB#0:
+; X64-NEXT: leaq 80(%rdi), %rax
+; X64-NEXT: retq
+ %arrayidx = getelementptr i32, i32* %arr, i8 20
+ ret i32* %arrayidx
+}
+
+define i32* @test_gep_i16(i32 *%arr, i16 %ind) {
+; X64_GISEL-LABEL: test_gep_i16:
+; X64_GISEL: # BB#0:
+; X64_GISEL-NEXT: movq $4, %rax
+; X64_GISEL-NEXT: movswq %si, %rcx
+; X64_GISEL-NEXT: imulq %rax, %rcx
+; X64_GISEL-NEXT: leaq (%rdi,%rcx), %rax
+; X64_GISEL-NEXT: retq
+;
+; X64-LABEL: test_gep_i16:
+; X64: # BB#0:
+; X64-NEXT: # kill: %ESI<def> %ESI<kill> %RSI<def>
+; X64-NEXT: movswq %si, %rax
+; X64-NEXT: leaq (%rdi,%rax,4), %rax
+; X64-NEXT: retq
+ %arrayidx = getelementptr i32, i32* %arr, i16 %ind
+ ret i32* %arrayidx
+}
+
+define i32* @test_gep_i16_const(i32 *%arr) {
+; X64_GISEL-LABEL: test_gep_i16_const:
+; X64_GISEL: # BB#0:
+; X64_GISEL-NEXT: movq $80, %rax
+; X64_GISEL-NEXT: leaq (%rdi,%rax), %rax
+; X64_GISEL-NEXT: retq
+;
+; X64-LABEL: test_gep_i16_const:
+; X64: # BB#0:
+; X64-NEXT: leaq 80(%rdi), %rax
+; X64-NEXT: retq
+ %arrayidx = getelementptr i32, i32* %arr, i16 20
+ ret i32* %arrayidx
+}
+
+define i32* @test_gep_i32(i32 *%arr, i32 %ind) {
+; X64_GISEL-LABEL: test_gep_i32:
+; X64_GISEL: # BB#0:
+; X64_GISEL-NEXT: movq $4, %rax
+; X64_GISEL-NEXT: movslq %esi, %rcx
+; X64_GISEL-NEXT: imulq %rax, %rcx
+; X64_GISEL-NEXT: leaq (%rdi,%rcx), %rax
+; X64_GISEL-NEXT: retq
+;
+; X64-LABEL: test_gep_i32:
+; X64: # BB#0:
+; X64-NEXT: movslq %esi, %rax
+; X64-NEXT: leaq (%rdi,%rax,4), %rax
+; X64-NEXT: retq
+ %arrayidx = getelementptr i32, i32* %arr, i32 %ind
+ ret i32* %arrayidx
+}
+
+define i32* @test_gep_i32_const(i32 *%arr) {
+; X64_GISEL-LABEL: test_gep_i32_const:
+; X64_GISEL: # BB#0:
+; X64_GISEL-NEXT: movq $20, %rax
+; X64_GISEL-NEXT: leaq (%rdi,%rax), %rax
+; X64_GISEL-NEXT: retq
+;
+; X64-LABEL: test_gep_i32_const:
+; X64: # BB#0:
+; X64-NEXT: leaq 20(%rdi), %rax
+; X64-NEXT: retq
+ %arrayidx = getelementptr i32, i32* %arr, i32 5
+ ret i32* %arrayidx
+}
+
+define i32* @test_gep_i64(i32 *%arr, i64 %ind) {
+; X64_GISEL-LABEL: test_gep_i64:
+; X64_GISEL: # BB#0:
+; X64_GISEL-NEXT: movq $4, %rax
+; X64_GISEL-NEXT: imulq %rsi, %rax
+; X64_GISEL-NEXT: leaq (%rdi,%rax), %rax
+; X64_GISEL-NEXT: retq
+;
+; X64-LABEL: test_gep_i64:
+; X64: # BB#0:
+; X64-NEXT: leaq (%rdi,%rsi,4), %rax
+; X64-NEXT: retq
+ %arrayidx = getelementptr i32, i32* %arr, i64 %ind
+ ret i32* %arrayidx
+}
+
+define i32* @test_gep_i64_const(i32 *%arr) {
+; X64_GISEL-LABEL: test_gep_i64_const:
+; X64_GISEL: # BB#0:
+; X64_GISEL-NEXT: movq $20, %rax
+; X64_GISEL-NEXT: leaq (%rdi,%rax), %rax
+; X64_GISEL-NEXT: retq
+;
+; X64-LABEL: test_gep_i64_const:
+; X64: # BB#0:
+; X64-NEXT: leaq 20(%rdi), %rax
+; X64-NEXT: retq
+ %arrayidx = getelementptr i32, i32* %arr, i64 5
+ ret i32* %arrayidx
+}
+
diff --git a/test/CodeGen/X86/GlobalISel/legalize-gep.mir b/test/CodeGen/X86/GlobalISel/legalize-gep.mir
new file mode 100644
index 000000000000..4fdb9b910ad7
--- /dev/null
+++ b/test/CodeGen/X86/GlobalISel/legalize-gep.mir
@@ -0,0 +1,101 @@
+# RUN: llc -mtriple=x86_64-linux-gnu -global-isel -run-pass=legalizer %s -o - | FileCheck %s
+
+--- |
+ define void @test_gep_i8() {
+ %arrayidx = getelementptr i32, i32* undef, i8 5
+ ret void
+ }
+
+ define void @test_gep_i16() {
+ %arrayidx = getelementptr i32, i32* undef, i16 5
+ ret void
+ }
+
+ define void @test_gep_i32() {
+ %arrayidx = getelementptr i32, i32* undef, i32 5
+ ret void
+ }
+
+ define void @test_gep_i64() {
+ %arrayidx = getelementptr i32, i32* undef, i64 5
+ ret void
+ }
+...
+---
+name: test_gep_i8
+# CHECK-LABEL: name: test_gep_i8
+legalized: false
+registers:
+ - { id: 0, class: _ }
+ - { id: 1, class: _ }
+ - { id: 2, class: _ }
+# CHECK: %0(p0) = IMPLICIT_DEF
+# CHECK-NEXT: %1(s8) = G_CONSTANT i8 20
+# CHECK-NEXT: %3(s32) = G_SEXT %1(s8)
+# CHECK-NEXT: %2(p0) = G_GEP %0, %3(s32)
+# CHECK-NEXT: RET 0
+body: |
+ bb.1 (%ir-block.0):
+ %0(p0) = IMPLICIT_DEF
+ %1(s8) = G_CONSTANT i8 20
+ %2(p0) = G_GEP %0, %1(s8)
+ RET 0
+...
+---
+name: test_gep_i16
+# CHECK-LABEL: name: test_gep_i16
+legalized: false
+registers:
+ - { id: 0, class: _ }
+ - { id: 1, class: _ }
+ - { id: 2, class: _ }
+# CHECK: %0(p0) = IMPLICIT_DEF
+# CHECK-NEXT: %1(s16) = G_CONSTANT i16 20
+# CHECK-NEXT: %3(s32) = G_SEXT %1(s16)
+# CHECK-NEXT: %2(p0) = G_GEP %0, %3(s32)
+# CHECK-NEXT: RET 0
+body: |
+ bb.1 (%ir-block.0):
+ %0(p0) = IMPLICIT_DEF
+ %1(s16) = G_CONSTANT i16 20
+ %2(p0) = G_GEP %0, %1(s16)
+ RET 0
+...
+---
+name: test_gep_i32
+# CHECK-LABEL: name: test_gep_i32
+legalized: false
+registers:
+ - { id: 0, class: _ }
+ - { id: 1, class: _ }
+ - { id: 2, class: _ }
+# CHECK: %0(p0) = IMPLICIT_DEF
+# CHECK-NEXT: %1(s32) = G_CONSTANT i32 20
+# CHECK-NEXT: %2(p0) = G_GEP %0, %1(s32)
+# CHECK-NEXT: RET 0
+body: |
+ bb.1 (%ir-block.0):
+ %0(p0) = IMPLICIT_DEF
+ %1(s32) = G_CONSTANT i32 20
+ %2(p0) = G_GEP %0, %1(s32)
+ RET 0
+...
+---
+name: test_gep_i64
+# CHECK-LABEL: name: test_gep_i64
+legalized: false
+registers:
+ - { id: 0, class: _ }
+ - { id: 1, class: _ }
+ - { id: 2, class: _ }
+# CHECK: %0(p0) = IMPLICIT_DEF
+# CHECK-NEXT: %1(s64) = G_CONSTANT i64 20
+# CHECK-NEXT: %2(p0) = G_GEP %0, %1(s64)
+# CHECK-NEXT: RET 0
+body: |
+ bb.1 (%ir-block.0):
+ %0(p0) = IMPLICIT_DEF
+ %1(s64) = G_CONSTANT i64 20
+ %2(p0) = G_GEP %0, %1(s64)
+ RET 0
+...
diff --git a/test/CodeGen/X86/GlobalISel/legalize-mul-scalar.mir b/test/CodeGen/X86/GlobalISel/legalize-mul-scalar.mir
new file mode 100644
index 000000000000..0d66a6384107
--- /dev/null
+++ b/test/CodeGen/X86/GlobalISel/legalize-mul-scalar.mir
@@ -0,0 +1,115 @@
+# RUN: llc -mtriple=x86_64-linux-gnu -global-isel -run-pass=legalizer %s -o - | FileCheck %s
+
+--- |
+ define i16 @test_mul_i16(i16 %arg1, i16 %arg2) {
+ %ret = mul i16 %arg1, %arg2
+ ret i16 %ret
+ }
+
+ define i32 @test_mul_i32(i32 %arg1, i32 %arg2) {
+ %ret = mul i32 %arg1, %arg2
+ ret i32 %ret
+ }
+
+ define i64 @test_mul_i64(i64 %arg1, i64 %arg2) {
+ %ret = mul i64 %arg1, %arg2
+ ret i64 %ret
+ }
+
+...
+---
+name: test_mul_i16
+# CHECK-LABEL: name: test_mul_i16
+alignment: 4
+legalized: false
+regBankSelected: false
+# CHECK: registers:
+# CHECK-NEXT: - { id: 0, class: _ }
+# CHECK-NEXT: - { id: 1, class: _ }
+# CHECK-NEXT: - { id: 2, class: _ }
+registers:
+ - { id: 0, class: _ }
+ - { id: 1, class: _ }
+ - { id: 2, class: _ }
+# CHECK: body: |
+# CHECK-NEXT: bb.0 (%ir-block.0):
+# CHECK-NEXT: %0(s16) = COPY %edi
+# CHECK-NEXT: %1(s16) = COPY %esi
+# CHECK-NEXT: %2(s16) = G_MUL %0, %1
+# CHECK-NEXT: %ax = COPY %2(s16)
+# CHECK-NEXT: RET 0, implicit %ax
+body: |
+ bb.1 (%ir-block.0):
+ liveins: %edi, %esi
+
+ %0(s16) = COPY %edi
+ %1(s16) = COPY %esi
+ %2(s16) = G_MUL %0, %1
+ %ax = COPY %2(s16)
+ RET 0, implicit %ax
+
+...
+---
+name: test_mul_i32
+# CHECK-LABEL: name: test_mul_i32
+alignment: 4
+legalized: false
+regBankSelected: false
+# CHECK: registers:
+# CHECK-NEXT: - { id: 0, class: _ }
+# CHECK-NEXT: - { id: 1, class: _ }
+# CHECK-NEXT: - { id: 2, class: _ }
+registers:
+ - { id: 0, class: _ }
+ - { id: 1, class: _ }
+ - { id: 2, class: _ }
+# CHECK: body: |
+# CHECK-NEXT: bb.0 (%ir-block.0):
+# CHECK-NEXT: %0(s32) = COPY %edi
+# CHECK-NEXT: %1(s32) = COPY %esi
+# CHECK-NEXT: %2(s32) = G_MUL %0, %1
+# CHECK-NEXT: %eax = COPY %2(s32)
+# CHECK-NEXT: RET 0, implicit %eax
+body: |
+ bb.1 (%ir-block.0):
+ liveins: %edi, %esi
+
+ %0(s32) = COPY %edi
+ %1(s32) = COPY %esi
+ %2(s32) = G_MUL %0, %1
+ %eax = COPY %2(s32)
+ RET 0, implicit %eax
+
+...
+---
+name: test_mul_i64
+# CHECK-LABEL: name: test_mul_i64
+alignment: 4
+legalized: false
+regBankSelected: false
+# CHECK: registers:
+# CHECK-NEXT: - { id: 0, class: _ }
+# CHECK-NEXT: - { id: 1, class: _ }
+# CHECK-NEXT: - { id: 2, class: _ }
+registers:
+ - { id: 0, class: _ }
+ - { id: 1, class: _ }
+ - { id: 2, class: _ }
+# CHECK: body: |
+# CHECK-NEXT: bb.0 (%ir-block.0):
+# CHECK-NEXT: %0(s64) = COPY %rdi
+# CHECK-NEXT: %1(s64) = COPY %rsi
+# CHECK-NEXT: %2(s64) = G_MUL %0, %1
+# CHECK-NEXT: %rax = COPY %2(s64)
+# CHECK-NEXT: RET 0, implicit %rax
+body: |
+ bb.1 (%ir-block.0):
+ liveins: %rdi, %rsi
+
+ %0(s64) = COPY %rdi
+ %1(s64) = COPY %rsi
+ %2(s64) = G_MUL %0, %1
+ %rax = COPY %2(s64)
+ RET 0, implicit %rax
+
+...
diff --git a/test/CodeGen/X86/GlobalISel/legalize-mul-v128.mir b/test/CodeGen/X86/GlobalISel/legalize-mul-v128.mir
new file mode 100644
index 000000000000..be62832b008a
--- /dev/null
+++ b/test/CodeGen/X86/GlobalISel/legalize-mul-v128.mir
@@ -0,0 +1,111 @@
+# RUN: llc -mtriple=x86_64-linux-gnu -global-isel -run-pass=legalizer %s -o - | FileCheck %s --check-prefix=ALL
+--- |
+ define <8 x i16> @test_mul_v8i16(<8 x i16> %arg1, <8 x i16> %arg2) #0 {
+ %ret = mul <8 x i16> %arg1, %arg2
+ ret <8 x i16> %ret
+ }
+
+ define <4 x i32> @test_mul_v4i32(<4 x i32> %arg1, <4 x i32> %arg2) #0 {
+ %ret = mul <4 x i32> %arg1, %arg2
+ ret <4 x i32> %ret
+ }
+
+ define <2 x i64> @test_mul_v2i64(<2 x i64> %arg1, <2 x i64> %arg2) #1 {
+ %ret = mul <2 x i64> %arg1, %arg2
+ ret <2 x i64> %ret
+ }
+
+ attributes #0 = { "target-features"="+sse4.1" }
+ attributes #1 = { "target-features"="+sse4.1,+avx512vl,+avx512f,+avx512dq" }
+
+...
+---
+name: test_mul_v8i16
+# ALL-LABEL: name: test_mul_v8i16
+alignment: 4
+legalized: false
+regBankSelected: false
+# ALL: registers:
+# ALL-NEXT: - { id: 0, class: _ }
+# ALL-NEXT: - { id: 1, class: _ }
+# ALL-NEXT: - { id: 2, class: _ }
+registers:
+ - { id: 0, class: _ }
+ - { id: 1, class: _ }
+ - { id: 2, class: _ }
+# ALL: %0(<8 x s16>) = COPY %xmm0
+# ALL-NEXT: %1(<8 x s16>) = COPY %xmm1
+# ALL-NEXT: %2(<8 x s16>) = G_MUL %0, %1
+# ALL-NEXT: %xmm0 = COPY %2(<8 x s16>)
+# ALL-NEXT: RET 0, implicit %xmm0
+body: |
+ bb.1 (%ir-block.0):
+ liveins: %xmm0, %xmm1
+
+ %0(<8 x s16>) = COPY %xmm0
+ %1(<8 x s16>) = COPY %xmm1
+ %2(<8 x s16>) = G_MUL %0, %1
+ %xmm0 = COPY %2(<8 x s16>)
+ RET 0, implicit %xmm0
+
+...
+---
+name: test_mul_v4i32
+# ALL-LABEL: name: test_mul_v4i32
+alignment: 4
+legalized: false
+regBankSelected: false
+# ALL: registers:
+# ALL-NEXT: - { id: 0, class: _ }
+# ALL-NEXT: - { id: 1, class: _ }
+# ALL-NEXT: - { id: 2, class: _ }
+registers:
+ - { id: 0, class: _ }
+ - { id: 1, class: _ }
+ - { id: 2, class: _ }
+# ALL: %0(<4 x s32>) = COPY %xmm0
+# ALL-NEXT: %1(<4 x s32>) = COPY %xmm1
+# ALL-NEXT: %2(<4 x s32>) = G_MUL %0, %1
+# ALL-NEXT: %xmm0 = COPY %2(<4 x s32>)
+# ALL-NEXT: RET 0, implicit %xmm0
+body: |
+ bb.1 (%ir-block.0):
+ liveins: %xmm0, %xmm1
+
+ %0(<4 x s32>) = COPY %xmm0
+ %1(<4 x s32>) = COPY %xmm1
+ %2(<4 x s32>) = G_MUL %0, %1
+ %xmm0 = COPY %2(<4 x s32>)
+ RET 0, implicit %xmm0
+
+...
+---
+name: test_mul_v2i64
+# ALL-LABEL: name: test_mul_v2i64
+alignment: 4
+legalized: false
+regBankSelected: false
+# ALL: registers:
+# ALL-NEXT: - { id: 0, class: _ }
+# ALL-NEXT: - { id: 1, class: _ }
+# ALL-NEXT: - { id: 2, class: _ }
+registers:
+ - { id: 0, class: _ }
+ - { id: 1, class: _ }
+ - { id: 2, class: _ }
+# ALL: %0(<2 x s64>) = COPY %xmm0
+# ALL-NEXT: %1(<2 x s64>) = COPY %xmm1
+# ALL-NEXT: %2(<2 x s64>) = G_MUL %0, %1
+# ALL-NEXT: %xmm0 = COPY %2(<2 x s64>)
+# ALL-NEXT: RET 0, implicit %xmm0
+body: |
+ bb.1 (%ir-block.0):
+ liveins: %xmm0, %xmm1
+
+ %0(<2 x s64>) = COPY %xmm0
+ %1(<2 x s64>) = COPY %xmm1
+ %2(<2 x s64>) = G_MUL %0, %1
+ %xmm0 = COPY %2(<2 x s64>)
+ RET 0, implicit %xmm0
+
+...
diff --git a/test/CodeGen/X86/GlobalISel/legalize-mul-v256.mir b/test/CodeGen/X86/GlobalISel/legalize-mul-v256.mir
new file mode 100644
index 000000000000..d99303c3ba3b
--- /dev/null
+++ b/test/CodeGen/X86/GlobalISel/legalize-mul-v256.mir
@@ -0,0 +1,111 @@
+# RUN: llc -mtriple=x86_64-linux-gnu -global-isel -run-pass=legalizer %s -o - | FileCheck %s --check-prefix=ALL
+--- |
+ define <16 x i16> @test_mul_v16i16(<16 x i16> %arg1, <16 x i16> %arg2) #0 {
+ %ret = mul <16 x i16> %arg1, %arg2
+ ret <16 x i16> %ret
+ }
+
+ define <8 x i32> @test_mul_v8i32(<8 x i32> %arg1, <8 x i32> %arg2) #0 {
+ %ret = mul <8 x i32> %arg1, %arg2
+ ret <8 x i32> %ret
+ }
+
+ define <4 x i64> @test_mul_v4i64(<4 x i64> %arg1, <4 x i64> %arg2) #1 {
+ %ret = mul <4 x i64> %arg1, %arg2
+ ret <4 x i64> %ret
+ }
+
+ attributes #0 = { "target-features"="+avx2" }
+ attributes #1 = { "target-features"="+avx2,+avx512vl,+avx512f,+avx512dq" }
+
+...
+---
+name: test_mul_v16i16
+# ALL-LABEL: name: test_mul_v16i16
+alignment: 4
+legalized: false
+regBankSelected: false
+# ALL: registers:
+# ALL-NEXT: - { id: 0, class: _ }
+# ALL-NEXT: - { id: 1, class: _ }
+# ALL-NEXT: - { id: 2, class: _ }
+registers:
+ - { id: 0, class: _ }
+ - { id: 1, class: _ }
+ - { id: 2, class: _ }
+# ALL: %0(<16 x s16>) = COPY %ymm0
+# ALL-NEXT: %1(<16 x s16>) = COPY %ymm1
+# ALL-NEXT: %2(<16 x s16>) = G_MUL %0, %1
+# ALL-NEXT: %ymm0 = COPY %2(<16 x s16>)
+# ALL-NEXT: RET 0, implicit %ymm0
+body: |
+ bb.1 (%ir-block.0):
+ liveins: %ymm0, %ymm1
+
+ %0(<16 x s16>) = COPY %ymm0
+ %1(<16 x s16>) = COPY %ymm1
+ %2(<16 x s16>) = G_MUL %0, %1
+ %ymm0 = COPY %2(<16 x s16>)
+ RET 0, implicit %ymm0
+
+...
+---
+name: test_mul_v8i32
+# ALL-LABEL: name: test_mul_v8i32
+alignment: 4
+legalized: false
+regBankSelected: false
+# ALL: registers:
+# ALL-NEXT: - { id: 0, class: _ }
+# ALL-NEXT: - { id: 1, class: _ }
+# ALL-NEXT: - { id: 2, class: _ }
+registers:
+ - { id: 0, class: _ }
+ - { id: 1, class: _ }
+ - { id: 2, class: _ }
+# ALL: %0(<8 x s32>) = COPY %ymm0
+# ALL-NEXT: %1(<8 x s32>) = COPY %ymm1
+# ALL-NEXT: %2(<8 x s32>) = G_MUL %0, %1
+# ALL-NEXT: %ymm0 = COPY %2(<8 x s32>)
+# ALL-NEXT: RET 0, implicit %ymm0
+body: |
+ bb.1 (%ir-block.0):
+ liveins: %ymm0, %ymm1
+
+ %0(<8 x s32>) = COPY %ymm0
+ %1(<8 x s32>) = COPY %ymm1
+ %2(<8 x s32>) = G_MUL %0, %1
+ %ymm0 = COPY %2(<8 x s32>)
+ RET 0, implicit %ymm0
+
+...
+---
+name: test_mul_v4i64
+# ALL-LABEL: name: test_mul_v4i64
+alignment: 4
+legalized: false
+regBankSelected: false
+# ALL: registers:
+# ALL-NEXT: - { id: 0, class: _ }
+# ALL-NEXT: - { id: 1, class: _ }
+# ALL-NEXT: - { id: 2, class: _ }
+registers:
+ - { id: 0, class: _ }
+ - { id: 1, class: _ }
+ - { id: 2, class: _ }
+# ALL: %0(<4 x s64>) = COPY %ymm0
+# ALL-NEXT: %1(<4 x s64>) = COPY %ymm1
+# ALL-NEXT: %2(<4 x s64>) = G_MUL %0, %1
+# ALL-NEXT: %ymm0 = COPY %2(<4 x s64>)
+# ALL-NEXT: RET 0, implicit %ymm0
+body: |
+ bb.1 (%ir-block.0):
+ liveins: %ymm0, %ymm1
+
+ %0(<4 x s64>) = COPY %ymm0
+ %1(<4 x s64>) = COPY %ymm1
+ %2(<4 x s64>) = G_MUL %0, %1
+ %ymm0 = COPY %2(<4 x s64>)
+ RET 0, implicit %ymm0
+
+...
diff --git a/test/CodeGen/X86/GlobalISel/legalize-mul-v512.mir b/test/CodeGen/X86/GlobalISel/legalize-mul-v512.mir
new file mode 100644
index 000000000000..24eefd30c2ac
--- /dev/null
+++ b/test/CodeGen/X86/GlobalISel/legalize-mul-v512.mir
@@ -0,0 +1,113 @@
+# RUN: llc -mtriple=x86_64-linux-gnu -global-isel -run-pass=legalizer %s -o - | FileCheck %s --check-prefix=ALL
+
+--- |
+ define <32 x i16> @test_mul_v32i16(<32 x i16> %arg1, <32 x i16> %arg2) #0 {
+ %ret = mul <32 x i16> %arg1, %arg2
+ ret <32 x i16> %ret
+ }
+
+ define <16 x i32> @test_mul_v16i32(<16 x i32> %arg1, <16 x i32> %arg2) #1 {
+ %ret = mul <16 x i32> %arg1, %arg2
+ ret <16 x i32> %ret
+ }
+
+ define <8 x i64> @test_mul_v8i64(<8 x i64> %arg1, <8 x i64> %arg2) #2 {
+ %ret = mul <8 x i64> %arg1, %arg2
+ ret <8 x i64> %ret
+ }
+
+ attributes #0 = { "target-features"="+avx512f,+avx512bw" }
+ attributes #1 = { "target-features"="+avx512f" }
+ attributes #2 = { "target-features"="+avx512f,+avx512dq" }
+
+...
+---
+name: test_mul_v32i16
+# ALL-LABEL: name: test_mul_v32i16
+alignment: 4
+legalized: false
+regBankSelected: false
+# ALL: registers:
+# ALL-NEXT: - { id: 0, class: _ }
+# ALL-NEXT: - { id: 1, class: _ }
+# ALL-NEXT: - { id: 2, class: _ }
+registers:
+ - { id: 0, class: _ }
+ - { id: 1, class: _ }
+ - { id: 2, class: _ }
+# ALL: %0(<32 x s16>) = COPY %zmm0
+# ALL-NEXT: %1(<32 x s16>) = COPY %zmm1
+# ALL-NEXT: %2(<32 x s16>) = G_MUL %0, %1
+# ALL-NEXT: %zmm0 = COPY %2(<32 x s16>)
+# ALL-NEXT: RET 0, implicit %zmm0
+body: |
+ bb.1 (%ir-block.0):
+ liveins: %zmm0, %zmm1
+
+ %0(<32 x s16>) = COPY %zmm0
+ %1(<32 x s16>) = COPY %zmm1
+ %2(<32 x s16>) = G_MUL %0, %1
+ %zmm0 = COPY %2(<32 x s16>)
+ RET 0, implicit %zmm0
+
+...
+---
+name: test_mul_v16i32
+# ALL-LABEL: name: test_mul_v16i32
+alignment: 4
+legalized: false
+regBankSelected: false
+# ALL: registers:
+# ALL-NEXT: - { id: 0, class: _ }
+# ALL-NEXT: - { id: 1, class: _ }
+# ALL-NEXT: - { id: 2, class: _ }
+registers:
+ - { id: 0, class: _ }
+ - { id: 1, class: _ }
+ - { id: 2, class: _ }
+# ALL: %0(<16 x s32>) = COPY %zmm0
+# ALL-NEXT: %1(<16 x s32>) = COPY %zmm1
+# ALL-NEXT: %2(<16 x s32>) = G_MUL %0, %1
+# ALL-NEXT: %zmm0 = COPY %2(<16 x s32>)
+# ALL-NEXT: RET 0, implicit %zmm0
+body: |
+ bb.1 (%ir-block.0):
+ liveins: %zmm0, %zmm1
+
+ %0(<16 x s32>) = COPY %zmm0
+ %1(<16 x s32>) = COPY %zmm1
+ %2(<16 x s32>) = G_MUL %0, %1
+ %zmm0 = COPY %2(<16 x s32>)
+ RET 0, implicit %zmm0
+
+...
+---
+name: test_mul_v8i64
+# ALL-LABEL: name: test_mul_v8i64
+alignment: 4
+legalized: false
+regBankSelected: false
+# ALL: registers:
+# ALL-NEXT: - { id: 0, class: _ }
+# ALL-NEXT: - { id: 1, class: _ }
+# ALL-NEXT: - { id: 2, class: _ }
+registers:
+ - { id: 0, class: _ }
+ - { id: 1, class: _ }
+ - { id: 2, class: _ }
+# ALL: %0(<8 x s64>) = COPY %zmm0
+# ALL-NEXT: %1(<8 x s64>) = COPY %zmm1
+# ALL-NEXT: %2(<8 x s64>) = G_MUL %0, %1
+# ALL-NEXT: %zmm0 = COPY %2(<8 x s64>)
+# ALL-NEXT: RET 0, implicit %zmm0
+body: |
+ bb.1 (%ir-block.0):
+ liveins: %zmm0, %zmm1
+
+ %0(<8 x s64>) = COPY %zmm0
+ %1(<8 x s64>) = COPY %zmm1
+ %2(<8 x s64>) = G_MUL %0, %1
+ %zmm0 = COPY %2(<8 x s64>)
+ RET 0, implicit %zmm0
+
+...
diff --git a/test/CodeGen/X86/GlobalISel/mul-scalar.ll b/test/CodeGen/X86/GlobalISel/mul-scalar.ll
new file mode 100644
index 000000000000..529e81c43304
--- /dev/null
+++ b/test/CodeGen/X86/GlobalISel/mul-scalar.ll
@@ -0,0 +1,39 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -mtriple=x86_64-linux-gnu -global-isel < %s -o - | FileCheck %s --check-prefix=ALL --check-prefix=X64
+
+;TODO: instruction selection not supported yet
+;define i8 @test_mul_i8(i8 %arg1, i8 %arg2) {
+; %ret = mul i8 %arg1, %arg2
+; ret i8 %ret
+;}
+
+define i16 @test_mul_i16(i16 %arg1, i16 %arg2) {
+; X64-LABEL: test_mul_i16:
+; X64: # BB#0:
+; X64-NEXT: imulw %di, %si
+; X64-NEXT: movl %esi, %eax
+; X64-NEXT: retq
+ %ret = mul i16 %arg1, %arg2
+ ret i16 %ret
+}
+
+define i32 @test_mul_i32(i32 %arg1, i32 %arg2) {
+; X64-LABEL: test_mul_i32:
+; X64: # BB#0:
+; X64-NEXT: imull %edi, %esi
+; X64-NEXT: movl %esi, %eax
+; X64-NEXT: retq
+ %ret = mul i32 %arg1, %arg2
+ ret i32 %ret
+}
+
+define i64 @test_mul_i64(i64 %arg1, i64 %arg2) {
+; X64-LABEL: test_mul_i64:
+; X64: # BB#0:
+; X64-NEXT: imulq %rdi, %rsi
+; X64-NEXT: movq %rsi, %rax
+; X64-NEXT: retq
+ %ret = mul i64 %arg1, %arg2
+ ret i64 %ret
+}
+
diff --git a/test/CodeGen/X86/GlobalISel/mul-vec.ll b/test/CodeGen/X86/GlobalISel/mul-vec.ll
new file mode 100644
index 000000000000..83615a718528
--- /dev/null
+++ b/test/CodeGen/X86/GlobalISel/mul-vec.ll
@@ -0,0 +1,84 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -mtriple=x86_64-linux-gnu -mcpu=skx -global-isel < %s -o - | FileCheck %s --check-prefix=SKX
+
+define <8 x i16> @test_mul_v8i16(<8 x i16> %arg1, <8 x i16> %arg2) {
+; SKX-LABEL: test_mul_v8i16:
+; SKX: # BB#0:
+; SKX-NEXT: vpmullw %xmm1, %xmm0, %xmm0
+; SKX-NEXT: retq
+ %ret = mul <8 x i16> %arg1, %arg2
+ ret <8 x i16> %ret
+}
+
+define <4 x i32> @test_mul_v4i32(<4 x i32> %arg1, <4 x i32> %arg2) {
+; SKX-LABEL: test_mul_v4i32:
+; SKX: # BB#0:
+; SKX-NEXT: vpmulld %xmm1, %xmm0, %xmm0
+; SKX-NEXT: retq
+ %ret = mul <4 x i32> %arg1, %arg2
+ ret <4 x i32> %ret
+}
+
+define <2 x i64> @test_mul_v2i64(<2 x i64> %arg1, <2 x i64> %arg2) {
+; SKX-LABEL: test_mul_v2i64:
+; SKX: # BB#0:
+; SKX-NEXT: vpmullq %xmm1, %xmm0, %xmm0
+; SKX-NEXT: retq
+ %ret = mul <2 x i64> %arg1, %arg2
+ ret <2 x i64> %ret
+}
+
+define <16 x i16> @test_mul_v16i16(<16 x i16> %arg1, <16 x i16> %arg2) {
+; SKX-LABEL: test_mul_v16i16:
+; SKX: # BB#0:
+; SKX-NEXT: vpmullw %ymm1, %ymm0, %ymm0
+; SKX-NEXT: retq
+ %ret = mul <16 x i16> %arg1, %arg2
+ ret <16 x i16> %ret
+}
+
+define <8 x i32> @test_mul_v8i32(<8 x i32> %arg1, <8 x i32> %arg2) {
+; SKX-LABEL: test_mul_v8i32:
+; SKX: # BB#0:
+; SKX-NEXT: vpmulld %ymm1, %ymm0, %ymm0
+; SKX-NEXT: retq
+ %ret = mul <8 x i32> %arg1, %arg2
+ ret <8 x i32> %ret
+}
+
+define <4 x i64> @test_mul_v4i64(<4 x i64> %arg1, <4 x i64> %arg2) {
+; SKX-LABEL: test_mul_v4i64:
+; SKX: # BB#0:
+; SKX-NEXT: vpmullq %ymm1, %ymm0, %ymm0
+; SKX-NEXT: retq
+ %ret = mul <4 x i64> %arg1, %arg2
+ ret <4 x i64> %ret
+}
+
+define <32 x i16> @test_mul_v32i16(<32 x i16> %arg1, <32 x i16> %arg2) {
+; SKX-LABEL: test_mul_v32i16:
+; SKX: # BB#0:
+; SKX-NEXT: vpmullw %zmm1, %zmm0, %zmm0
+; SKX-NEXT: retq
+ %ret = mul <32 x i16> %arg1, %arg2
+ ret <32 x i16> %ret
+}
+
+define <16 x i32> @test_mul_v16i32(<16 x i32> %arg1, <16 x i32> %arg2) {
+; SKX-LABEL: test_mul_v16i32:
+; SKX: # BB#0:
+; SKX-NEXT: vpmulld %zmm1, %zmm0, %zmm0
+; SKX-NEXT: retq
+ %ret = mul <16 x i32> %arg1, %arg2
+ ret <16 x i32> %ret
+}
+
+define <8 x i64> @test_mul_v8i64(<8 x i64> %arg1, <8 x i64> %arg2) {
+; SKX-LABEL: test_mul_v8i64:
+; SKX: # BB#0:
+; SKX-NEXT: vpmullq %zmm1, %zmm0, %zmm0
+; SKX-NEXT: retq
+ %ret = mul <8 x i64> %arg1, %arg2
+ ret <8 x i64> %ret
+}
+
diff --git a/test/CodeGen/X86/GlobalISel/regbankselect-AVX2.mir b/test/CodeGen/X86/GlobalISel/regbankselect-AVX2.mir
new file mode 100644
index 000000000000..446db56b992c
--- /dev/null
+++ b/test/CodeGen/X86/GlobalISel/regbankselect-AVX2.mir
@@ -0,0 +1,31 @@
+# RUN: llc -mtriple=x86_64-linux-gnu -mattr=+avx2 --global-isel -run-pass=regbankselect %s -o - | FileCheck %s --check-prefix=CHECK --check-prefix=FAST
+# RUN: llc -mtriple=x86_64-linux-gnu -mattr=+avx2 --global-isel -regbankselect-greedy -run-pass=regbankselect %s -o - | FileCheck %s --check-prefix=CHECK --check-prefix=GREEDY
+
+--- |
+ define void @test_mul_vec256() {
+ ret void
+ }
+...
+---
+name: test_mul_vec256
+alignment: 4
+legalized: true
+regBankSelected: false
+selected: false
+tracksRegLiveness: true
+# CHECK-LABEL: name: test_mul_vec256
+# CHECK: registers:
+# CHECK: - { id: 0, class: vecr }
+# CHECK: - { id: 1, class: vecr }
+registers:
+ - { id: 0, class: _ }
+ - { id: 1, class: _ }
+ - { id: 2, class: _ }
+body: |
+ bb.1 (%ir-block.0):
+
+ %0(<8 x s32>) = IMPLICIT_DEF
+ %1(<8 x s32>) = G_MUL %0, %0
+ RET 0
+
+...
diff --git a/test/CodeGen/X86/GlobalISel/regbankselect-AVX512.mir b/test/CodeGen/X86/GlobalISel/regbankselect-AVX512.mir
new file mode 100644
index 000000000000..f824ee12dcfb
--- /dev/null
+++ b/test/CodeGen/X86/GlobalISel/regbankselect-AVX512.mir
@@ -0,0 +1,33 @@
+# RUN: llc -mtriple=x86_64-linux-gnu -mattr=+avx512f -global-isel -run-pass=regbankselect %s -o - | FileCheck %s --check-prefix=CHECK --check-prefix=FAST
+# RUN: llc -mtriple=x86_64-linux-gnu -mattr=+avx512f -global-isel -regbankselect-greedy -run-pass=regbankselect %s -o - | FileCheck %s --check-prefix=CHECK --check-prefix=GREEDY
+
+--- |
+
+ define void @test_mul_vec512() {
+ ret void
+ }
+
+...
+---
+name: test_mul_vec512
+alignment: 4
+legalized: true
+regBankSelected: false
+selected: false
+tracksRegLiveness: true
+# CHECK-LABEL: name: test_mul_vec512
+# CHECK: registers:
+# CHECK: - { id: 0, class: vecr }
+# CHECK: - { id: 1, class: vecr }
+registers:
+ - { id: 0, class: _ }
+ - { id: 1, class: _ }
+ - { id: 2, class: _ }
+body: |
+ bb.1 (%ir-block.0):
+
+ %0(<16 x s32>) = IMPLICIT_DEF
+ %1(<16 x s32>) = G_MUL %0, %0
+ RET 0
+
+...
diff --git a/test/CodeGen/X86/GlobalISel/regbankselect-X86_64.mir b/test/CodeGen/X86/GlobalISel/regbankselect-X86_64.mir
index 8e04239041a8..3a65a9003773 100644
--- a/test/CodeGen/X86/GlobalISel/regbankselect-X86_64.mir
+++ b/test/CodeGen/X86/GlobalISel/regbankselect-X86_64.mir
@@ -27,6 +27,10 @@
ret i64 %ret
}
+ define void @test_mul_gpr() {
+ ret void
+ }
+
define float @test_add_float(float %arg1, float %arg2) {
%ret = fadd float %arg1, %arg2
ret float %ret
@@ -110,6 +114,12 @@
ret void
}
+ define void @test_gep() {
+ %p1 = getelementptr i32, i32* undef, i32 5
+ %p2 = getelementptr i32, i32* undef, i64 5
+ ret void
+ }
+
...
---
name: test_add_i8
@@ -220,6 +230,45 @@ body: |
...
---
+name: test_mul_gpr
+alignment: 4
+legalized: true
+regBankSelected: false
+selected: false
+tracksRegLiveness: true
+# CHECK-LABEL: name: test_mul_gpr
+# CHECK: registers:
+# CHECK: - { id: 0, class: gpr }
+# CHECK: - { id: 1, class: gpr }
+# CHECK: - { id: 2, class: gpr }
+# CHECK: - { id: 3, class: gpr }
+# CHECK: - { id: 4, class: gpr }
+# CHECK: - { id: 5, class: gpr }
+# CHECK: - { id: 6, class: gpr }
+# CHECK: - { id: 7, class: gpr }
+registers:
+ - { id: 0, class: _ }
+ - { id: 1, class: _ }
+ - { id: 2, class: _ }
+ - { id: 3, class: _ }
+ - { id: 4, class: _ }
+ - { id: 5, class: _ }
+ - { id: 6, class: _ }
+ - { id: 7, class: _ }
+body: |
+ bb.1 (%ir-block.0):
+
+ %0(s64) = IMPLICIT_DEF
+ %1(s32) = IMPLICIT_DEF
+ %2(s16) = IMPLICIT_DEF
+ %3(s8) = IMPLICIT_DEF
+ %4(s64) = G_MUL %0, %0
+ %5(s32) = G_MUL %1, %1
+ %6(s16) = G_MUL %2, %2
+ %7(s8) = G_MUL %3, %3
+ RET 0
+...
+---
name: test_add_float
alignment: 4
legalized: true
@@ -660,3 +709,29 @@ body: |
RET 0
...
+---
+name: test_gep
+legalized: true
+# CHECK-LABEL: name: test_gep
+# CHECK: registers:
+# CHECK-NEXT: - { id: 0, class: gpr }
+# CHECK-NEXT: - { id: 1, class: gpr }
+# CHECK-NEXT: - { id: 2, class: gpr }
+# CHECK-NEXT: - { id: 3, class: gpr }
+# CHECK-NEXT: - { id: 4, class: gpr }
+registers:
+ - { id: 0, class: _ }
+ - { id: 1, class: _ }
+ - { id: 2, class: _ }
+ - { id: 3, class: _ }
+ - { id: 4, class: _ }
+body: |
+ bb.0 (%ir-block.0):
+ %0(p0) = IMPLICIT_DEF
+ %1(s32) = G_CONSTANT i32 20
+ %2(p0) = G_GEP %0, %1(s32)
+ %3(s64) = G_CONSTANT i64 20
+ %4(p0) = G_GEP %0, %3(s64)
+ RET 0
+
+...
diff --git a/test/CodeGen/X86/GlobalISel/select-gep.mir b/test/CodeGen/X86/GlobalISel/select-gep.mir
new file mode 100644
index 000000000000..2c89b7057c3d
--- /dev/null
+++ b/test/CodeGen/X86/GlobalISel/select-gep.mir
@@ -0,0 +1,37 @@
+# RUN: llc -mtriple=x86_64-linux-gnu -global-isel -run-pass=instruction-select %s -o - | FileCheck %s --check-prefix=CHECK
+
+--- |
+ define i32* @test_gep_i32(i32* %arr) {
+ %arrayidx = getelementptr i32, i32* %arr, i32 5
+ ret i32* %arrayidx
+ }
+...
+---
+name: test_gep_i32
+alignment: 4
+legalized: true
+regBankSelected: true
+selected: false
+# CHECK-LABEL: name: test_gep_i32
+# CHECK: registers:
+# CHECK-NEXT: - { id: 0, class: gr64 }
+# CHECK-NEXT: - { id: 1, class: gr64_nosp }
+# CHECK-NEXT: - { id: 2, class: gr64 }
+registers:
+ - { id: 0, class: gpr }
+ - { id: 1, class: gpr }
+ - { id: 2, class: gpr }
+# CHECK: body:
+# CHECK: %1 = MOV64ri32 20
+# CHECK-NEXT: %2 = LEA64r %0, 1, %1, 0, _
+body: |
+ bb.1 (%ir-block.0):
+ liveins: %rdi
+
+ %0(p0) = COPY %rdi
+ %1(s64) = G_CONSTANT i64 20
+ %2(p0) = G_GEP %0, %1(s64)
+ %rax = COPY %2(p0)
+ RET 0, implicit %rax
+
+...
diff --git a/test/CodeGen/X86/GlobalISel/select-mul-scalar.mir b/test/CodeGen/X86/GlobalISel/select-mul-scalar.mir
new file mode 100644
index 000000000000..34a77acc2d1e
--- /dev/null
+++ b/test/CodeGen/X86/GlobalISel/select-mul-scalar.mir
@@ -0,0 +1,112 @@
+# RUN: llc -mtriple=x86_64-linux-gnu -global-isel -run-pass=instruction-select -verify-machineinstrs %s -o - | FileCheck %s --check-prefix=ALL
+
+--- |
+ define i16 @test_mul_i16(i16 %arg1, i16 %arg2) {
+ %ret = mul i16 %arg1, %arg2
+ ret i16 %ret
+ }
+
+ define i32 @test_mul_i32(i32 %arg1, i32 %arg2) {
+ %ret = mul i32 %arg1, %arg2
+ ret i32 %ret
+ }
+
+ define i64 @test_mul_i64(i64 %arg1, i64 %arg2) {
+ %ret = mul i64 %arg1, %arg2
+ ret i64 %ret
+ }
+
+...
+---
+name: test_mul_i16
+# ALL-LABEL: name: test_mul_i16
+alignment: 4
+legalized: true
+regBankSelected: true
+# ALL: registers:
+# ALL-NEXT: - { id: 0, class: gr16 }
+# ALL-NEXT: - { id: 1, class: gr16 }
+# ALL-NEXT: - { id: 2, class: gr16 }
+registers:
+ - { id: 0, class: gpr }
+ - { id: 1, class: gpr }
+ - { id: 2, class: gpr }
+# ALL: body: |
+# ALL: %0 = COPY %di
+# ALL-NEXT: %1 = COPY %si
+# ALL-NEXT: %2 = IMUL16rr %0, %1, implicit-def %eflags
+# ALL-NEXT: %ax = COPY %2
+# ALL-NEXT: RET 0, implicit %ax
+body: |
+ bb.1 (%ir-block.0):
+ liveins: %edi, %esi
+
+ %0(s16) = COPY %edi
+ %1(s16) = COPY %esi
+ %2(s16) = G_MUL %0, %1
+ %ax = COPY %2(s16)
+ RET 0, implicit %ax
+
+...
+---
+name: test_mul_i32
+# ALL-LABEL: name: test_mul_i32
+alignment: 4
+legalized: true
+regBankSelected: true
+# ALL: registers:
+# ALL-NEXT: - { id: 0, class: gr32 }
+# ALL-NEXT: - { id: 1, class: gr32 }
+# ALL-NEXT: - { id: 2, class: gr32 }
+registers:
+ - { id: 0, class: gpr }
+ - { id: 1, class: gpr }
+ - { id: 2, class: gpr }
+# ALL: body: |
+# ALL: %0 = COPY %edi
+# ALL-NEXT: %1 = COPY %esi
+# ALL-NEXT: %2 = IMUL32rr %0, %1, implicit-def %eflags
+# ALL-NEXT: %eax = COPY %2
+# ALL-NEXT: RET 0, implicit %eax
+body: |
+ bb.1 (%ir-block.0):
+ liveins: %edi, %esi
+
+ %0(s32) = COPY %edi
+ %1(s32) = COPY %esi
+ %2(s32) = G_MUL %0, %1
+ %eax = COPY %2(s32)
+ RET 0, implicit %eax
+
+...
+---
+name: test_mul_i64
+# ALL-LABEL: name: test_mul_i64
+alignment: 4
+legalized: true
+regBankSelected: true
+# ALL: registers:
+# ALL-NEXT: - { id: 0, class: gr64 }
+# ALL-NEXT: - { id: 1, class: gr64 }
+# ALL-NEXT: - { id: 2, class: gr64 }
+registers:
+ - { id: 0, class: gpr }
+ - { id: 1, class: gpr }
+ - { id: 2, class: gpr }
+# ALL: body: |
+# ALL: %0 = COPY %rdi
+# ALL-NEXT: %1 = COPY %rsi
+# ALL-NEXT: %2 = IMUL64rr %0, %1, implicit-def %eflags
+# ALL-NEXT: %rax = COPY %2
+# ALL-NEXT: RET 0, implicit %rax
+body: |
+ bb.1 (%ir-block.0):
+ liveins: %rdi, %rsi
+
+ %0(s64) = COPY %rdi
+ %1(s64) = COPY %rsi
+ %2(s64) = G_MUL %0, %1
+ %rax = COPY %2(s64)
+ RET 0, implicit %rax
+
+...
diff --git a/test/CodeGen/X86/GlobalISel/select-mul-vec.mir b/test/CodeGen/X86/GlobalISel/select-mul-vec.mir
new file mode 100644
index 000000000000..5f8ab1e4f189
--- /dev/null
+++ b/test/CodeGen/X86/GlobalISel/select-mul-vec.mir
@@ -0,0 +1,480 @@
+# RUN: llc -mtriple=x86_64-linux-gnu -global-isel -run-pass=instruction-select -verify-machineinstrs %s -o - | FileCheck %s
+
+--- |
+ define <8 x i16> @test_mul_v8i16(<8 x i16> %arg1, <8 x i16> %arg2) #0 {
+ %ret = mul <8 x i16> %arg1, %arg2
+ ret <8 x i16> %ret
+ }
+
+ define <8 x i16> @test_mul_v8i16_avx(<8 x i16> %arg1, <8 x i16> %arg2) #1 {
+ %ret = mul <8 x i16> %arg1, %arg2
+ ret <8 x i16> %ret
+ }
+
+ define <8 x i16> @test_mul_v8i16_avx512bwvl(<8 x i16> %arg1, <8 x i16> %arg2) #2 {
+ %ret = mul <8 x i16> %arg1, %arg2
+ ret <8 x i16> %ret
+ }
+
+ define <4 x i32> @test_mul_v4i32(<4 x i32> %arg1, <4 x i32> %arg2) #3 {
+ %ret = mul <4 x i32> %arg1, %arg2
+ ret <4 x i32> %ret
+ }
+
+ define <4 x i32> @test_mul_v4i32_avx(<4 x i32> %arg1, <4 x i32> %arg2) #1 {
+ %ret = mul <4 x i32> %arg1, %arg2
+ ret <4 x i32> %ret
+ }
+
+ define <4 x i32> @test_mul_v4i32_avx512vl(<4 x i32> %arg1, <4 x i32> %arg2) #4 {
+ %ret = mul <4 x i32> %arg1, %arg2
+ ret <4 x i32> %ret
+ }
+
+ define <2 x i64> @test_mul_v2i64(<2 x i64> %arg1, <2 x i64> %arg2) #5 {
+ %ret = mul <2 x i64> %arg1, %arg2
+ ret <2 x i64> %ret
+ }
+
+ define <16 x i16> @test_mul_v16i16(<16 x i16> %arg1, <16 x i16> %arg2) #6 {
+ %ret = mul <16 x i16> %arg1, %arg2
+ ret <16 x i16> %ret
+ }
+
+ define <16 x i16> @test_mul_v16i16_avx512bwvl(<16 x i16> %arg1, <16 x i16> %arg2) #2 {
+ %ret = mul <16 x i16> %arg1, %arg2
+ ret <16 x i16> %ret
+ }
+
+ define <8 x i32> @test_mul_v8i32(<8 x i32> %arg1, <8 x i32> %arg2) #6 {
+ %ret = mul <8 x i32> %arg1, %arg2
+ ret <8 x i32> %ret
+ }
+
+ define <8 x i32> @test_mul_v8i32_avx512vl(<8 x i32> %arg1, <8 x i32> %arg2) #4 {
+ %ret = mul <8 x i32> %arg1, %arg2
+ ret <8 x i32> %ret
+ }
+
+ define <4 x i64> @test_mul_v4i64(<4 x i64> %arg1, <4 x i64> %arg2) #5 {
+ %ret = mul <4 x i64> %arg1, %arg2
+ ret <4 x i64> %ret
+ }
+
+ define <32 x i16> @test_mul_v32i16(<32 x i16> %arg1, <32 x i16> %arg2) #7 {
+ %ret = mul <32 x i16> %arg1, %arg2
+ ret <32 x i16> %ret
+ }
+
+ define <16 x i32> @test_mul_v16i32(<16 x i32> %arg1, <16 x i32> %arg2) #8 {
+ %ret = mul <16 x i32> %arg1, %arg2
+ ret <16 x i32> %ret
+ }
+
+ define <8 x i64> @test_mul_v8i64(<8 x i64> %arg1, <8 x i64> %arg2) #9 {
+ %ret = mul <8 x i64> %arg1, %arg2
+ ret <8 x i64> %ret
+ }
+
+ attributes #0 = { "target-features"="+sse2" }
+ attributes #1 = { "target-features"="+avx" }
+ attributes #2 = { "target-features"="+avx512vl,+avx512f,+avx512bw" }
+ attributes #3 = { "target-features"="+sse4.1" }
+ attributes #4 = { "target-features"="+avx512vl,+avx512f" }
+ attributes #5 = { "target-features"="+avx2,+avx512vl,+avx512f,+avx512dq" }
+ attributes #6 = { "target-features"="+avx2" }
+ attributes #7 = { "target-features"="+avx512f,+avx512bw" }
+ attributes #8 = { "target-features"="+avx512f" }
+ attributes #9 = { "target-features"="+avx512f,+avx512dq" }
+
+...
+---
+name: test_mul_v8i16
+# CHECK-LABEL: name: test_mul_v8i16
+alignment: 4
+legalized: true
+regBankSelected: true
+# CHECK: registers:
+# CHECK-NEXT: - { id: 0, class: vr128 }
+# CHECK-NEXT: - { id: 1, class: vr128 }
+# CHECK-NEXT: - { id: 2, class: vr128 }
+registers:
+ - { id: 0, class: vecr }
+ - { id: 1, class: vecr }
+ - { id: 2, class: vecr }
+# CHECK: %2 = PMULLWrr %0, %1
+body: |
+ bb.1 (%ir-block.0):
+ liveins: %xmm0, %xmm1
+
+ %0(<8 x s16>) = COPY %xmm0
+ %1(<8 x s16>) = COPY %xmm1
+ %2(<8 x s16>) = G_MUL %0, %1
+ %xmm0 = COPY %2(<8 x s16>)
+ RET 0, implicit %xmm0
+
+...
+---
+name: test_mul_v8i16_avx
+# CHECK-LABEL: name: test_mul_v8i16_avx
+alignment: 4
+legalized: true
+regBankSelected: true
+# CHECK: registers:
+# CHECK-NEXT: - { id: 0, class: vr128 }
+# CHECK-NEXT: - { id: 1, class: vr128 }
+# CHECK-NEXT: - { id: 2, class: vr128 }
+registers:
+ - { id: 0, class: vecr }
+ - { id: 1, class: vecr }
+ - { id: 2, class: vecr }
+# CHECK: %2 = VPMULLWrr %0, %1
+body: |
+ bb.1 (%ir-block.0):
+ liveins: %xmm0, %xmm1
+
+ %0(<8 x s16>) = COPY %xmm0
+ %1(<8 x s16>) = COPY %xmm1
+ %2(<8 x s16>) = G_MUL %0, %1
+ %xmm0 = COPY %2(<8 x s16>)
+ RET 0, implicit %xmm0
+
+...
+---
+name: test_mul_v8i16_avx512bwvl
+# CHECK-LABEL: name: test_mul_v8i16_avx512bwvl
+alignment: 4
+legalized: true
+regBankSelected: true
+# CHECK: registers:
+# CHECK-NEXT: - { id: 0, class: vr128x }
+# CHECK-NEXT: - { id: 1, class: vr128x }
+# CHECK-NEXT: - { id: 2, class: vr128x }
+registers:
+ - { id: 0, class: vecr }
+ - { id: 1, class: vecr }
+ - { id: 2, class: vecr }
+# CHECK: %2 = VPMULLWZ128rr %0, %1
+body: |
+ bb.1 (%ir-block.0):
+ liveins: %xmm0, %xmm1
+
+ %0(<8 x s16>) = COPY %xmm0
+ %1(<8 x s16>) = COPY %xmm1
+ %2(<8 x s16>) = G_MUL %0, %1
+ %xmm0 = COPY %2(<8 x s16>)
+ RET 0, implicit %xmm0
+
+...
+---
+name: test_mul_v4i32
+# CHECK-LABEL: name: test_mul_v4i32
+alignment: 4
+legalized: true
+regBankSelected: true
+# CHECK: registers:
+# CHECK-NEXT: - { id: 0, class: vr128 }
+# CHECK-NEXT: - { id: 1, class: vr128 }
+# CHECK-NEXT: - { id: 2, class: vr128 }
+registers:
+ - { id: 0, class: vecr }
+ - { id: 1, class: vecr }
+ - { id: 2, class: vecr }
+# CHECK: %2 = PMULLDrr %0, %1
+body: |
+ bb.1 (%ir-block.0):
+ liveins: %xmm0, %xmm1
+
+ %0(<4 x s32>) = COPY %xmm0
+ %1(<4 x s32>) = COPY %xmm1
+ %2(<4 x s32>) = G_MUL %0, %1
+ %xmm0 = COPY %2(<4 x s32>)
+ RET 0, implicit %xmm0
+
+...
+---
+name: test_mul_v4i32_avx
+# CHECK-LABEL: name: test_mul_v4i32_avx
+alignment: 4
+legalized: true
+regBankSelected: true
+# CHECK: registers:
+# CHECK-NEXT: - { id: 0, class: vr128 }
+# CHECK-NEXT: - { id: 1, class: vr128 }
+# CHECK-NEXT: - { id: 2, class: vr128 }
+registers:
+ - { id: 0, class: vecr }
+ - { id: 1, class: vecr }
+ - { id: 2, class: vecr }
+# CHECK: %2 = VPMULLDrr %0, %1
+body: |
+ bb.1 (%ir-block.0):
+ liveins: %xmm0, %xmm1
+
+ %0(<4 x s32>) = COPY %xmm0
+ %1(<4 x s32>) = COPY %xmm1
+ %2(<4 x s32>) = G_MUL %0, %1
+ %xmm0 = COPY %2(<4 x s32>)
+ RET 0, implicit %xmm0
+
+...
+---
+name: test_mul_v4i32_avx512vl
+# CHECK-LABEL: name: test_mul_v4i32_avx512vl
+alignment: 4
+legalized: true
+regBankSelected: true
+# CHECK: registers:
+# CHECK-NEXT: - { id: 0, class: vr128x }
+# CHECK-NEXT: - { id: 1, class: vr128x }
+# CHECK-NEXT: - { id: 2, class: vr128x }
+registers:
+ - { id: 0, class: vecr }
+ - { id: 1, class: vecr }
+ - { id: 2, class: vecr }
+# CHECK: %2 = VPMULLDZ128rr %0, %1
+body: |
+ bb.1 (%ir-block.0):
+ liveins: %xmm0, %xmm1
+
+ %0(<4 x s32>) = COPY %xmm0
+ %1(<4 x s32>) = COPY %xmm1
+ %2(<4 x s32>) = G_MUL %0, %1
+ %xmm0 = COPY %2(<4 x s32>)
+ RET 0, implicit %xmm0
+
+...
+---
+name: test_mul_v2i64
+# CHECK-LABEL: name: test_mul_v2i64
+alignment: 4
+legalized: true
+regBankSelected: true
+# CHECK: registers:
+# CHECK-NEXT: - { id: 0, class: vr128x }
+# CHECK-NEXT: - { id: 1, class: vr128x }
+# CHECK-NEXT: - { id: 2, class: vr128x }
+registers:
+ - { id: 0, class: vecr }
+ - { id: 1, class: vecr }
+ - { id: 2, class: vecr }
+# CHECK: %2 = VPMULLQZ128rr %0, %1
+body: |
+ bb.1 (%ir-block.0):
+ liveins: %xmm0, %xmm1
+
+ %0(<2 x s64>) = COPY %xmm0
+ %1(<2 x s64>) = COPY %xmm1
+ %2(<2 x s64>) = G_MUL %0, %1
+ %xmm0 = COPY %2(<2 x s64>)
+ RET 0, implicit %xmm0
+
+...
+---
+name: test_mul_v16i16
+# CHECK-LABEL: name: test_mul_v16i16
+alignment: 4
+legalized: true
+regBankSelected: true
+# CHECK: registers:
+# CHECK-NEXT: - { id: 0, class: vr256 }
+# CHECK-NEXT: - { id: 1, class: vr256 }
+# CHECK-NEXT: - { id: 2, class: vr256 }
+registers:
+ - { id: 0, class: vecr }
+ - { id: 1, class: vecr }
+ - { id: 2, class: vecr }
+# CHECK: %2 = VPMULLWYrr %0, %1
+body: |
+ bb.1 (%ir-block.0):
+ liveins: %ymm0, %ymm1
+
+ %0(<16 x s16>) = COPY %ymm0
+ %1(<16 x s16>) = COPY %ymm1
+ %2(<16 x s16>) = G_MUL %0, %1
+ %ymm0 = COPY %2(<16 x s16>)
+ RET 0, implicit %ymm0
+
+...
+---
+name: test_mul_v16i16_avx512bwvl
+# CHECK-LABEL: name: test_mul_v16i16_avx512bwvl
+alignment: 4
+legalized: true
+regBankSelected: true
+# CHECK: registers:
+# CHECK-NEXT: - { id: 0, class: vr256x }
+# CHECK-NEXT: - { id: 1, class: vr256x }
+# CHECK-NEXT: - { id: 2, class: vr256x }
+registers:
+ - { id: 0, class: vecr }
+ - { id: 1, class: vecr }
+ - { id: 2, class: vecr }
+# CHECK: %2 = VPMULLWZ256rr %0, %1
+body: |
+ bb.1 (%ir-block.0):
+ liveins: %ymm0, %ymm1
+
+ %0(<16 x s16>) = COPY %ymm0
+ %1(<16 x s16>) = COPY %ymm1
+ %2(<16 x s16>) = G_MUL %0, %1
+ %ymm0 = COPY %2(<16 x s16>)
+ RET 0, implicit %ymm0
+
+...
+---
+name: test_mul_v8i32
+# CHECK-LABEL: name: test_mul_v8i32
+alignment: 4
+legalized: true
+regBankSelected: true
+# CHECK: registers:
+# CHECK-NEXT: - { id: 0, class: vr256 }
+# CHECK-NEXT: - { id: 1, class: vr256 }
+# CHECK-NEXT: - { id: 2, class: vr256 }
+registers:
+ - { id: 0, class: vecr }
+ - { id: 1, class: vecr }
+ - { id: 2, class: vecr }
+# CHECK: %2 = VPMULLDYrr %0, %1
+body: |
+ bb.1 (%ir-block.0):
+ liveins: %ymm0, %ymm1
+
+ %0(<8 x s32>) = COPY %ymm0
+ %1(<8 x s32>) = COPY %ymm1
+ %2(<8 x s32>) = G_MUL %0, %1
+ %ymm0 = COPY %2(<8 x s32>)
+ RET 0, implicit %ymm0
+
+...
+---
+name: test_mul_v8i32_avx512vl
+# CHECK-LABEL: name: test_mul_v8i32_avx512vl
+alignment: 4
+legalized: true
+regBankSelected: true
+# CHECK: registers:
+# CHECK-NEXT: - { id: 0, class: vr256x }
+# CHECK-NEXT: - { id: 1, class: vr256x }
+# CHECK-NEXT: - { id: 2, class: vr256x }
+registers:
+ - { id: 0, class: vecr }
+ - { id: 1, class: vecr }
+ - { id: 2, class: vecr }
+# CHECK: %2 = VPMULLDZ256rr %0, %1
+body: |
+ bb.1 (%ir-block.0):
+ liveins: %ymm0, %ymm1
+
+ %0(<8 x s32>) = COPY %ymm0
+ %1(<8 x s32>) = COPY %ymm1
+ %2(<8 x s32>) = G_MUL %0, %1
+ %ymm0 = COPY %2(<8 x s32>)
+ RET 0, implicit %ymm0
+
+...
+---
+name: test_mul_v4i64
+# CHECK-LABEL: name: test_mul_v4i64
+alignment: 4
+legalized: true
+regBankSelected: true
+# CHECK: registers:
+# CHECK-NEXT: - { id: 0, class: vr256x }
+# CHECK-NEXT: - { id: 1, class: vr256x }
+# CHECK-NEXT: - { id: 2, class: vr256x }
+registers:
+ - { id: 0, class: vecr }
+ - { id: 1, class: vecr }
+ - { id: 2, class: vecr }
+# CHECK: %2 = VPMULLQZ256rr %0, %1
+body: |
+ bb.1 (%ir-block.0):
+ liveins: %ymm0, %ymm1
+
+ %0(<4 x s64>) = COPY %ymm0
+ %1(<4 x s64>) = COPY %ymm1
+ %2(<4 x s64>) = G_MUL %0, %1
+ %ymm0 = COPY %2(<4 x s64>)
+ RET 0, implicit %ymm0
+
+...
+---
+name: test_mul_v32i16
+# CHECK-LABEL: name: test_mul_v32i16
+alignment: 4
+legalized: true
+regBankSelected: true
+# CHECK: registers:
+# CHECK-NEXT: - { id: 0, class: vr512 }
+# CHECK-NEXT: - { id: 1, class: vr512 }
+# CHECK-NEXT: - { id: 2, class: vr512 }
+registers:
+ - { id: 0, class: vecr }
+ - { id: 1, class: vecr }
+ - { id: 2, class: vecr }
+# CHECK: %2 = VPMULLWZrr %0, %1
+body: |
+ bb.1 (%ir-block.0):
+ liveins: %zmm0, %zmm1
+
+ %0(<32 x s16>) = COPY %zmm0
+ %1(<32 x s16>) = COPY %zmm1
+ %2(<32 x s16>) = G_MUL %0, %1
+ %zmm0 = COPY %2(<32 x s16>)
+ RET 0, implicit %zmm0
+
+...
+---
+name: test_mul_v16i32
+# CHECK-LABEL: name: test_mul_v16i32
+alignment: 4
+legalized: true
+regBankSelected: true
+# CHECK: registers:
+# CHECK-NEXT: - { id: 0, class: vr512 }
+# CHECK-NEXT: - { id: 1, class: vr512 }
+# CHECK-NEXT: - { id: 2, class: vr512 }
+registers:
+ - { id: 0, class: vecr }
+ - { id: 1, class: vecr }
+ - { id: 2, class: vecr }
+# CHECK: %2 = VPMULLDZrr %0, %1
+body: |
+ bb.1 (%ir-block.0):
+ liveins: %zmm0, %zmm1
+
+ %0(<16 x s32>) = COPY %zmm0
+ %1(<16 x s32>) = COPY %zmm1
+ %2(<16 x s32>) = G_MUL %0, %1
+ %zmm0 = COPY %2(<16 x s32>)
+ RET 0, implicit %zmm0
+
+...
+---
+name: test_mul_v8i64
+# CHECK-LABEL: name: test_mul_v8i64
+alignment: 4
+legalized: true
+regBankSelected: true
+# CHECK: registers:
+# CHECK-NEXT: - { id: 0, class: vr512 }
+# CHECK-NEXT: - { id: 1, class: vr512 }
+# CHECK-NEXT: - { id: 2, class: vr512 }
+registers:
+ - { id: 0, class: vecr }
+ - { id: 1, class: vecr }
+ - { id: 2, class: vecr }
+# CHECK: %2 = VPMULLQZrr %0, %1
+body: |
+ bb.1 (%ir-block.0):
+ liveins: %zmm0, %zmm1
+
+ %0(<8 x s64>) = COPY %zmm0
+ %1(<8 x s64>) = COPY %zmm1
+ %2(<8 x s64>) = G_MUL %0, %1
+ %zmm0 = COPY %2(<8 x s64>)
+ RET 0, implicit %zmm0
+
+...
diff --git a/test/CodeGen/X86/addcarry.ll b/test/CodeGen/X86/addcarry.ll
index 5e95cd832789..be550e3fe2d1 100644
--- a/test/CodeGen/X86/addcarry.ll
+++ b/test/CodeGen/X86/addcarry.ll
@@ -204,3 +204,70 @@ entry:
%6 = add i64 %4, %5
ret i64 %6
}
+
+%S = type { [4 x i64] }
+
+define %S @readd(%S* nocapture readonly %this, %S %arg.b) {
+; CHECK-LABEL: readd:
+; CHECK: # BB#0: # %entry
+; CHECK-NEXT: addq (%rsi), %rdx
+; CHECK-NEXT: movq 8(%rsi), %r10
+; CHECK-NEXT: adcq $0, %r10
+; CHECK-NEXT: sbbq %rax, %rax
+; CHECK-NEXT: andl $1, %eax
+; CHECK-NEXT: addq %rcx, %r10
+; CHECK-NEXT: adcq 16(%rsi), %rax
+; CHECK-NEXT: sbbq %rcx, %rcx
+; CHECK-NEXT: andl $1, %ecx
+; CHECK-NEXT: addq %r8, %rax
+; CHECK-NEXT: adcq 24(%rsi), %rcx
+; CHECK-NEXT: addq %r9, %rcx
+; CHECK-NEXT: movq %rdx, (%rdi)
+; CHECK-NEXT: movq %r10, 8(%rdi)
+; CHECK-NEXT: movq %rax, 16(%rdi)
+; CHECK-NEXT: movq %rcx, 24(%rdi)
+; CHECK-NEXT: movq %rdi, %rax
+; CHECK-NEXT: retq
+entry:
+ %0 = extractvalue %S %arg.b, 0
+ %.elt6 = extractvalue [4 x i64] %0, 1
+ %.elt8 = extractvalue [4 x i64] %0, 2
+ %.elt10 = extractvalue [4 x i64] %0, 3
+ %.elt = extractvalue [4 x i64] %0, 0
+ %1 = getelementptr inbounds %S, %S* %this, i64 0, i32 0, i64 0
+ %2 = load i64, i64* %1, align 8
+ %3 = zext i64 %2 to i128
+ %4 = zext i64 %.elt to i128
+ %5 = add nuw nsw i128 %3, %4
+ %6 = trunc i128 %5 to i64
+ %7 = lshr i128 %5, 64
+ %8 = getelementptr inbounds %S, %S* %this, i64 0, i32 0, i64 1
+ %9 = load i64, i64* %8, align 8
+ %10 = zext i64 %9 to i128
+ %11 = add nuw nsw i128 %7, %10
+ %12 = zext i64 %.elt6 to i128
+ %13 = add nuw nsw i128 %11, %12
+ %14 = trunc i128 %13 to i64
+ %15 = lshr i128 %13, 64
+ %16 = getelementptr inbounds %S, %S* %this, i64 0, i32 0, i64 2
+ %17 = load i64, i64* %16, align 8
+ %18 = zext i64 %17 to i128
+ %19 = add nuw nsw i128 %15, %18
+ %20 = zext i64 %.elt8 to i128
+ %21 = add nuw nsw i128 %19, %20
+ %22 = lshr i128 %21, 64
+ %23 = trunc i128 %21 to i64
+ %24 = getelementptr inbounds %S, %S* %this, i64 0,i32 0, i64 3
+ %25 = load i64, i64* %24, align 8
+ %26 = zext i64 %25 to i128
+ %27 = add nuw nsw i128 %22, %26
+ %28 = zext i64 %.elt10 to i128
+ %29 = add nuw nsw i128 %27, %28
+ %30 = trunc i128 %29 to i64
+ %31 = insertvalue [4 x i64] undef, i64 %6, 0
+ %32 = insertvalue [4 x i64] %31, i64 %14, 1
+ %33 = insertvalue [4 x i64] %32, i64 %23, 2
+ %34 = insertvalue [4 x i64] %33, i64 %30, 3
+ %35 = insertvalue %S undef, [4 x i64] %34, 0
+ ret %S %35
+}
diff --git a/test/CodeGen/X86/avx-isa-check.ll b/test/CodeGen/X86/avx-isa-check.ll
index dffc8078e44f..5d66dfde0bc6 100644
--- a/test/CodeGen/X86/avx-isa-check.ll
+++ b/test/CodeGen/X86/avx-isa-check.ll
@@ -680,3 +680,8 @@ define <4 x double> @_inreg4xdouble(double %a) {
%c = shufflevector <4 x double> %b, <4 x double> undef, <4 x i32> zeroinitializer
ret <4 x double> %c
}
+
+define <8 x i32> @test_mul_v8i32(<8 x i32> %arg1, <8 x i32> %arg2) #0 {
+ %ret = mul <8 x i32> %arg1, %arg2
+ ret <8 x i32> %ret
+}
diff --git a/test/CodeGen/X86/avx1-logical-load-folding.ll b/test/CodeGen/X86/avx1-logical-load-folding.ll
index 90e00c965391..7073eb224763 100644
--- a/test/CodeGen/X86/avx1-logical-load-folding.ll
+++ b/test/CodeGen/X86/avx1-logical-load-folding.ll
@@ -1,10 +1,26 @@
-; RUN: llc -O3 -disable-peephole -mcpu=corei7-avx -mattr=+avx < %s | FileCheck %s
-
-target datalayout = "e-i64:64-f80:128-n8:16:32:64-S128"
-target triple = "x86_64-apple-macosx10.9.0"
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc < %s -O3 -disable-peephole -mtriple=i686-apple-macosx10.9.0 -mcpu=corei7-avx -mattr=+avx | FileCheck %s --check-prefix=X86
+; RUN: llc < %s -O3 -disable-peephole -mtriple=x86_64-apple-macosx10.9.0 -mcpu=corei7-avx -mattr=+avx | FileCheck %s --check-prefix=X64
; Function Attrs: nounwind ssp uwtable
define void @test1(float* %A, float* %C) #0 {
+; X86-LABEL: test1:
+; X86: ## BB#0:
+; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT: vmovaps (%ecx), %ymm0
+; X86-NEXT: vandps LCPI0_0, %ymm0, %ymm0
+; X86-NEXT: vmovss %xmm0, (%eax)
+; X86-NEXT: vzeroupper
+; X86-NEXT: retl
+;
+; X64-LABEL: test1:
+; X64: ## BB#0:
+; X64-NEXT: vmovaps (%rdi), %ymm0
+; X64-NEXT: vandps {{.*}}(%rip), %ymm0, %ymm0
+; X64-NEXT: vmovss %xmm0, (%rsi)
+; X64-NEXT: vzeroupper
+; X64-NEXT: retq
%tmp1 = bitcast float* %A to <8 x float>*
%tmp2 = load <8 x float>, <8 x float>* %tmp1, align 32
%tmp3 = bitcast <8 x float> %tmp2 to <8 x i32>
@@ -13,12 +29,27 @@ define void @test1(float* %A, float* %C) #0 {
%tmp6 = extractelement <8 x float> %tmp5, i32 0
store float %tmp6, float* %C
ret void
-
- ; CHECK: vandps LCPI0_0(%rip), %ymm0, %ymm0
}
; Function Attrs: nounwind ssp uwtable
define void @test2(float* %A, float* %C) #0 {
+; X86-LABEL: test2:
+; X86: ## BB#0:
+; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT: vmovaps (%ecx), %ymm0
+; X86-NEXT: vorps LCPI1_0, %ymm0, %ymm0
+; X86-NEXT: vmovss %xmm0, (%eax)
+; X86-NEXT: vzeroupper
+; X86-NEXT: retl
+;
+; X64-LABEL: test2:
+; X64: ## BB#0:
+; X64-NEXT: vmovaps (%rdi), %ymm0
+; X64-NEXT: vorps {{.*}}(%rip), %ymm0, %ymm0
+; X64-NEXT: vmovss %xmm0, (%rsi)
+; X64-NEXT: vzeroupper
+; X64-NEXT: retq
%tmp1 = bitcast float* %A to <8 x float>*
%tmp2 = load <8 x float>, <8 x float>* %tmp1, align 32
%tmp3 = bitcast <8 x float> %tmp2 to <8 x i32>
@@ -27,12 +58,27 @@ define void @test2(float* %A, float* %C) #0 {
%tmp6 = extractelement <8 x float> %tmp5, i32 0
store float %tmp6, float* %C
ret void
-
- ; CHECK: vorps LCPI1_0(%rip), %ymm0, %ymm0
}
; Function Attrs: nounwind ssp uwtable
define void @test3(float* %A, float* %C) #0 {
+; X86-LABEL: test3:
+; X86: ## BB#0:
+; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT: vmovaps (%ecx), %ymm0
+; X86-NEXT: vxorps LCPI2_0, %ymm0, %ymm0
+; X86-NEXT: vmovss %xmm0, (%eax)
+; X86-NEXT: vzeroupper
+; X86-NEXT: retl
+;
+; X64-LABEL: test3:
+; X64: ## BB#0:
+; X64-NEXT: vmovaps (%rdi), %ymm0
+; X64-NEXT: vxorps {{.*}}(%rip), %ymm0, %ymm0
+; X64-NEXT: vmovss %xmm0, (%rsi)
+; X64-NEXT: vzeroupper
+; X64-NEXT: retq
%tmp1 = bitcast float* %A to <8 x float>*
%tmp2 = load <8 x float>, <8 x float>* %tmp1, align 32
%tmp3 = bitcast <8 x float> %tmp2 to <8 x i32>
@@ -41,11 +87,26 @@ define void @test3(float* %A, float* %C) #0 {
%tmp6 = extractelement <8 x float> %tmp5, i32 0
store float %tmp6, float* %C
ret void
-
- ; CHECK: vxorps LCPI2_0(%rip), %ymm0, %ymm0
}
define void @test4(float* %A, float* %C) #0 {
+; X86-LABEL: test4:
+; X86: ## BB#0:
+; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT: vmovaps (%ecx), %ymm0
+; X86-NEXT: vandnps LCPI3_0, %ymm0, %ymm0
+; X86-NEXT: vmovss %xmm0, (%eax)
+; X86-NEXT: vzeroupper
+; X86-NEXT: retl
+;
+; X64-LABEL: test4:
+; X64: ## BB#0:
+; X64-NEXT: vmovaps (%rdi), %ymm0
+; X64-NEXT: vandnps {{.*}}(%rip), %ymm0, %ymm0
+; X64-NEXT: vmovss %xmm0, (%rsi)
+; X64-NEXT: vzeroupper
+; X64-NEXT: retq
%tmp1 = bitcast float* %A to <8 x float>*
%tmp2 = load <8 x float>, <8 x float>* %tmp1, align 32
%tmp3 = bitcast <8 x float> %tmp2 to <8 x i32>
@@ -55,6 +116,4 @@ define void @test4(float* %A, float* %C) #0 {
%tmp7 = extractelement <8 x float> %tmp6, i32 0
store float %tmp7, float * %C
ret void
-
- ;CHECK: vandnps LCPI3_0(%rip), %ymm0, %ymm0
}
diff --git a/test/CodeGen/X86/avx2-schedule.ll b/test/CodeGen/X86/avx2-schedule.ll
new file mode 100644
index 000000000000..042bc217b97c
--- /dev/null
+++ b/test/CodeGen/X86/avx2-schedule.ll
@@ -0,0 +1,338 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -print-schedule -mcpu=haswell | FileCheck %s --check-prefix=CHECK --check-prefix=HASWELL
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -print-schedule -mcpu=skylake | FileCheck %s --check-prefix=CHECK --check-prefix=HASWELL
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -print-schedule -mcpu=znver1 | FileCheck %s --check-prefix=CHECK --check-prefix=ZNVER1
+
+define <32 x i8> @test_pabsb(<32 x i8> %a0, <32 x i8> *%a1) {
+; HASWELL-LABEL: test_pabsb:
+; HASWELL: # BB#0:
+; HASWELL-NEXT: vpabsb %ymm0, %ymm0 # sched: [1:0.50]
+; HASWELL-NEXT: vpabsb (%rdi), %ymm1 # sched: [5:0.50]
+; HASWELL-NEXT: vpor %ymm1, %ymm0, %ymm0 # sched: [1:0.33]
+; HASWELL-NEXT: retq # sched: [1:1.00]
+;
+; ZNVER1-LABEL: test_pabsb:
+; ZNVER1: # BB#0:
+; ZNVER1-NEXT: vpabsb (%rdi), %ymm1 # sched: [6:1.00]
+; ZNVER1-NEXT: vpabsb %ymm0, %ymm0 # sched: [1:0.50]
+; ZNVER1-NEXT: vpor %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
+; ZNVER1-NEXT: retq # sched: [4:1.00]
+ %1 = call <32 x i8> @llvm.x86.avx2.pabs.b(<32 x i8> %a0)
+ %2 = load <32 x i8>, <32 x i8> *%a1, align 32
+ %3 = call <32 x i8> @llvm.x86.avx2.pabs.b(<32 x i8> %2)
+ %4 = or <32 x i8> %1, %3
+ ret <32 x i8> %4
+}
+declare <32 x i8> @llvm.x86.avx2.pabs.b(<32 x i8>) nounwind readnone
+
+define <8 x i32> @test_pabsd(<8 x i32> %a0, <8 x i32> *%a1) {
+; HASWELL-LABEL: test_pabsd:
+; HASWELL: # BB#0:
+; HASWELL-NEXT: vpabsd %ymm0, %ymm0 # sched: [1:0.50]
+; HASWELL-NEXT: vpabsd (%rdi), %ymm1 # sched: [5:0.50]
+; HASWELL-NEXT: vpor %ymm1, %ymm0, %ymm0 # sched: [1:0.33]
+; HASWELL-NEXT: retq # sched: [1:1.00]
+;
+; ZNVER1-LABEL: test_pabsd:
+; ZNVER1: # BB#0:
+; ZNVER1-NEXT: vpabsd (%rdi), %ymm1 # sched: [6:1.00]
+; ZNVER1-NEXT: vpabsd %ymm0, %ymm0 # sched: [1:0.50]
+; ZNVER1-NEXT: vpor %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
+; ZNVER1-NEXT: retq # sched: [4:1.00]
+ %1 = call <8 x i32> @llvm.x86.avx2.pabs.d(<8 x i32> %a0)
+ %2 = load <8 x i32>, <8 x i32> *%a1, align 32
+ %3 = call <8 x i32> @llvm.x86.avx2.pabs.d(<8 x i32> %2)
+ %4 = or <8 x i32> %1, %3
+ ret <8 x i32> %4
+}
+declare <8 x i32> @llvm.x86.avx2.pabs.d(<8 x i32>) nounwind readnone
+
+define <16 x i16> @test_pabsw(<16 x i16> %a0, <16 x i16> *%a1) {
+; HASWELL-LABEL: test_pabsw:
+; HASWELL: # BB#0:
+; HASWELL-NEXT: vpabsw %ymm0, %ymm0 # sched: [1:0.50]
+; HASWELL-NEXT: vpabsw (%rdi), %ymm1 # sched: [5:0.50]
+; HASWELL-NEXT: vpor %ymm1, %ymm0, %ymm0 # sched: [1:0.33]
+; HASWELL-NEXT: retq # sched: [1:1.00]
+;
+; ZNVER1-LABEL: test_pabsw:
+; ZNVER1: # BB#0:
+; ZNVER1-NEXT: vpabsw (%rdi), %ymm1 # sched: [6:1.00]
+; ZNVER1-NEXT: vpabsw %ymm0, %ymm0 # sched: [1:0.50]
+; ZNVER1-NEXT: vpor %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
+; ZNVER1-NEXT: retq # sched: [4:1.00]
+ %1 = call <16 x i16> @llvm.x86.avx2.pabs.w(<16 x i16> %a0)
+ %2 = load <16 x i16>, <16 x i16> *%a1, align 32
+ %3 = call <16 x i16> @llvm.x86.avx2.pabs.w(<16 x i16> %2)
+ %4 = or <16 x i16> %1, %3
+ ret <16 x i16> %4
+}
+declare <16 x i16> @llvm.x86.avx2.pabs.w(<16 x i16>) nounwind readnone
+
+define <32 x i8> @test_paddb(<32 x i8> %a0, <32 x i8> %a1, <32 x i8> *%a2) {
+; HASWELL-LABEL: test_paddb:
+; HASWELL: # BB#0:
+; HASWELL-NEXT: vpaddb %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
+; HASWELL-NEXT: vpaddb (%rdi), %ymm0, %ymm0 # sched: [5:0.50]
+; HASWELL-NEXT: retq # sched: [1:1.00]
+;
+; ZNVER1-LABEL: test_paddb:
+; ZNVER1: # BB#0:
+; ZNVER1-NEXT: vpaddb %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
+; ZNVER1-NEXT: vpaddb (%rdi), %ymm0, %ymm0 # sched: [6:1.00]
+; ZNVER1-NEXT: retq # sched: [4:1.00]
+ %1 = add <32 x i8> %a0, %a1
+ %2 = load <32 x i8>, <32 x i8> *%a2, align 32
+ %3 = add <32 x i8> %1, %2
+ ret <32 x i8> %3
+}
+
+define <8 x i32> @test_paddd(<8 x i32> %a0, <8 x i32> %a1, <8 x i32> *%a2) {
+; HASWELL-LABEL: test_paddd:
+; HASWELL: # BB#0:
+; HASWELL-NEXT: vpaddd %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
+; HASWELL-NEXT: vpaddd (%rdi), %ymm0, %ymm0 # sched: [5:0.50]
+; HASWELL-NEXT: retq # sched: [1:1.00]
+;
+; ZNVER1-LABEL: test_paddd:
+; ZNVER1: # BB#0:
+; ZNVER1-NEXT: vpaddd %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
+; ZNVER1-NEXT: vpaddd (%rdi), %ymm0, %ymm0 # sched: [6:1.00]
+; ZNVER1-NEXT: retq # sched: [4:1.00]
+ %1 = add <8 x i32> %a0, %a1
+ %2 = load <8 x i32>, <8 x i32> *%a2, align 32
+ %3 = add <8 x i32> %1, %2
+ ret <8 x i32> %3
+}
+
+define <4 x i64> @test_paddq(<4 x i64> %a0, <4 x i64> %a1, <4 x i64> *%a2) {
+; HASWELL-LABEL: test_paddq:
+; HASWELL: # BB#0:
+; HASWELL-NEXT: vpaddq %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
+; HASWELL-NEXT: vpaddq (%rdi), %ymm0, %ymm0 # sched: [5:0.50]
+; HASWELL-NEXT: retq # sched: [1:1.00]
+;
+; ZNVER1-LABEL: test_paddq:
+; ZNVER1: # BB#0:
+; ZNVER1-NEXT: vpaddq %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
+; ZNVER1-NEXT: vpaddq (%rdi), %ymm0, %ymm0 # sched: [6:1.00]
+; ZNVER1-NEXT: retq # sched: [4:1.00]
+ %1 = add <4 x i64> %a0, %a1
+ %2 = load <4 x i64>, <4 x i64> *%a2, align 32
+ %3 = add <4 x i64> %1, %2
+ ret <4 x i64> %3
+}
+
+define <16 x i16> @test_paddw(<16 x i16> %a0, <16 x i16> %a1, <16 x i16> *%a2) {
+; HASWELL-LABEL: test_paddw:
+; HASWELL: # BB#0:
+; HASWELL-NEXT: vpaddw %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
+; HASWELL-NEXT: vpaddw (%rdi), %ymm0, %ymm0 # sched: [5:0.50]
+; HASWELL-NEXT: retq # sched: [1:1.00]
+;
+; ZNVER1-LABEL: test_paddw:
+; ZNVER1: # BB#0:
+; ZNVER1-NEXT: vpaddw %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
+; ZNVER1-NEXT: vpaddw (%rdi), %ymm0, %ymm0 # sched: [6:1.00]
+; ZNVER1-NEXT: retq # sched: [4:1.00]
+ %1 = add <16 x i16> %a0, %a1
+ %2 = load <16 x i16>, <16 x i16> *%a2, align 32
+ %3 = add <16 x i16> %1, %2
+ ret <16 x i16> %3
+}
+
+define <4 x i64> @test_pand(<4 x i64> %a0, <4 x i64> %a1, <4 x i64> *%a2) {
+; HASWELL-LABEL: test_pand:
+; HASWELL: # BB#0:
+; HASWELL-NEXT: vpand %ymm1, %ymm0, %ymm0 # sched: [1:0.33]
+; HASWELL-NEXT: vpand (%rdi), %ymm0, %ymm0 # sched: [5:0.50]
+; HASWELL-NEXT: vpaddq %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
+; HASWELL-NEXT: retq # sched: [1:1.00]
+;
+; ZNVER1-LABEL: test_pand:
+; ZNVER1: # BB#0:
+; ZNVER1-NEXT: vpand %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
+; ZNVER1-NEXT: vpand (%rdi), %ymm0, %ymm0 # sched: [6:1.00]
+; ZNVER1-NEXT: vpaddq %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
+; ZNVER1-NEXT: retq # sched: [4:1.00]
+ %1 = and <4 x i64> %a0, %a1
+ %2 = load <4 x i64>, <4 x i64> *%a2, align 32
+ %3 = and <4 x i64> %1, %2
+ %4 = add <4 x i64> %3, %a1
+ ret <4 x i64> %4
+}
+
+define <4 x i64> @test_pandn(<4 x i64> %a0, <4 x i64> %a1, <4 x i64> *%a2) {
+; HASWELL-LABEL: test_pandn:
+; HASWELL: # BB#0:
+; HASWELL-NEXT: vpandn %ymm1, %ymm0, %ymm0 # sched: [1:0.33]
+; HASWELL-NEXT: vpandn (%rdi), %ymm0, %ymm1 # sched: [5:0.50]
+; HASWELL-NEXT: vpaddq %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
+; HASWELL-NEXT: retq # sched: [1:1.00]
+;
+; ZNVER1-LABEL: test_pandn:
+; ZNVER1: # BB#0:
+; ZNVER1-NEXT: vpandn %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
+; ZNVER1-NEXT: vpandn (%rdi), %ymm0, %ymm1 # sched: [6:1.00]
+; ZNVER1-NEXT: vpaddq %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
+; ZNVER1-NEXT: retq # sched: [4:1.00]
+ %1 = xor <4 x i64> %a0, <i64 -1, i64 -1, i64 -1, i64 -1>
+ %2 = and <4 x i64> %a1, %1
+ %3 = load <4 x i64>, <4 x i64> *%a2, align 32
+ %4 = xor <4 x i64> %2, <i64 -1, i64 -1, i64 -1, i64 -1>
+ %5 = and <4 x i64> %3, %4
+ %6 = add <4 x i64> %2, %5
+ ret <4 x i64> %6
+}
+
+define <8 x i32> @test_pmulld(<8 x i32> %a0, <8 x i32> %a1, <8 x i32> *%a2) {
+; HASWELL-LABEL: test_pmulld:
+; HASWELL: # BB#0:
+; HASWELL-NEXT: vpmulld %ymm1, %ymm0, %ymm0 # sched: [10:2.00]
+; HASWELL-NEXT: vpmulld (%rdi), %ymm0, %ymm0 # sched: [10:2.00]
+; HASWELL-NEXT: retq # sched: [1:1.00]
+;
+; ZNVER1-LABEL: test_pmulld:
+; ZNVER1: # BB#0:
+; ZNVER1-NEXT: vpmulld %ymm1, %ymm0, %ymm0 # sched: [2:1.00]
+; ZNVER1-NEXT: vpmulld (%rdi), %ymm0, %ymm0 # sched: [7:1.00]
+; ZNVER1-NEXT: retq # sched: [4:1.00]
+ %1 = mul <8 x i32> %a0, %a1
+ %2 = load <8 x i32>, <8 x i32> *%a2, align 32
+ %3 = mul <8 x i32> %1, %2
+ ret <8 x i32> %3
+}
+
+define <16 x i16> @test_pmullw(<16 x i16> %a0, <16 x i16> %a1, <16 x i16> *%a2) {
+; HASWELL-LABEL: test_pmullw:
+; HASWELL: # BB#0:
+; HASWELL-NEXT: vpmullw %ymm1, %ymm0, %ymm0 # sched: [5:1.00]
+; HASWELL-NEXT: vpmullw (%rdi), %ymm0, %ymm0 # sched: [9:1.00]
+; HASWELL-NEXT: retq # sched: [1:1.00]
+;
+; ZNVER1-LABEL: test_pmullw:
+; ZNVER1: # BB#0:
+; ZNVER1-NEXT: vpmullw %ymm1, %ymm0, %ymm0 # sched: [2:1.00]
+; ZNVER1-NEXT: vpmullw (%rdi), %ymm0, %ymm0 # sched: [7:1.00]
+; ZNVER1-NEXT: retq # sched: [4:1.00]
+ %1 = mul <16 x i16> %a0, %a1
+ %2 = load <16 x i16>, <16 x i16> *%a2, align 32
+ %3 = mul <16 x i16> %1, %2
+ ret <16 x i16> %3
+}
+
+define <4 x i64> @test_por(<4 x i64> %a0, <4 x i64> %a1, <4 x i64> *%a2) {
+; HASWELL-LABEL: test_por:
+; HASWELL: # BB#0:
+; HASWELL-NEXT: vpor %ymm1, %ymm0, %ymm0 # sched: [1:0.33]
+; HASWELL-NEXT: vpor (%rdi), %ymm0, %ymm0 # sched: [5:0.50]
+; HASWELL-NEXT: vpaddq %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
+; HASWELL-NEXT: retq # sched: [1:1.00]
+;
+; ZNVER1-LABEL: test_por:
+; ZNVER1: # BB#0:
+; ZNVER1-NEXT: vpor %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
+; ZNVER1-NEXT: vpor (%rdi), %ymm0, %ymm0 # sched: [6:1.00]
+; ZNVER1-NEXT: vpaddq %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
+; ZNVER1-NEXT: retq # sched: [4:1.00]
+ %1 = or <4 x i64> %a0, %a1
+ %2 = load <4 x i64>, <4 x i64> *%a2, align 32
+ %3 = or <4 x i64> %1, %2
+ %4 = add <4 x i64> %3, %a1
+ ret <4 x i64> %4
+}
+
+define <32 x i8> @test_psubb(<32 x i8> %a0, <32 x i8> %a1, <32 x i8> *%a2) {
+; HASWELL-LABEL: test_psubb:
+; HASWELL: # BB#0:
+; HASWELL-NEXT: vpsubb %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
+; HASWELL-NEXT: vpsubb (%rdi), %ymm0, %ymm0 # sched: [5:0.50]
+; HASWELL-NEXT: retq # sched: [1:1.00]
+;
+; ZNVER1-LABEL: test_psubb:
+; ZNVER1: # BB#0:
+; ZNVER1-NEXT: vpsubb %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
+; ZNVER1-NEXT: vpsubb (%rdi), %ymm0, %ymm0 # sched: [6:1.00]
+; ZNVER1-NEXT: retq # sched: [4:1.00]
+ %1 = sub <32 x i8> %a0, %a1
+ %2 = load <32 x i8>, <32 x i8> *%a2, align 32
+ %3 = sub <32 x i8> %1, %2
+ ret <32 x i8> %3
+}
+
+define <8 x i32> @test_psubd(<8 x i32> %a0, <8 x i32> %a1, <8 x i32> *%a2) {
+; HASWELL-LABEL: test_psubd:
+; HASWELL: # BB#0:
+; HASWELL-NEXT: vpsubd %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
+; HASWELL-NEXT: vpsubd (%rdi), %ymm0, %ymm0 # sched: [5:0.50]
+; HASWELL-NEXT: retq # sched: [1:1.00]
+;
+; ZNVER1-LABEL: test_psubd:
+; ZNVER1: # BB#0:
+; ZNVER1-NEXT: vpsubd %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
+; ZNVER1-NEXT: vpsubd (%rdi), %ymm0, %ymm0 # sched: [6:1.00]
+; ZNVER1-NEXT: retq # sched: [4:1.00]
+ %1 = sub <8 x i32> %a0, %a1
+ %2 = load <8 x i32>, <8 x i32> *%a2, align 32
+ %3 = sub <8 x i32> %1, %2
+ ret <8 x i32> %3
+}
+
+define <4 x i64> @test_psubq(<4 x i64> %a0, <4 x i64> %a1, <4 x i64> *%a2) {
+; HASWELL-LABEL: test_psubq:
+; HASWELL: # BB#0:
+; HASWELL-NEXT: vpsubq %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
+; HASWELL-NEXT: vpsubq (%rdi), %ymm0, %ymm0 # sched: [5:0.50]
+; HASWELL-NEXT: retq # sched: [1:1.00]
+;
+; ZNVER1-LABEL: test_psubq:
+; ZNVER1: # BB#0:
+; ZNVER1-NEXT: vpsubq %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
+; ZNVER1-NEXT: vpsubq (%rdi), %ymm0, %ymm0 # sched: [6:1.00]
+; ZNVER1-NEXT: retq # sched: [4:1.00]
+ %1 = sub <4 x i64> %a0, %a1
+ %2 = load <4 x i64>, <4 x i64> *%a2, align 32
+ %3 = sub <4 x i64> %1, %2
+ ret <4 x i64> %3
+}
+
+define <16 x i16> @test_psubw(<16 x i16> %a0, <16 x i16> %a1, <16 x i16> *%a2) {
+; HASWELL-LABEL: test_psubw:
+; HASWELL: # BB#0:
+; HASWELL-NEXT: vpsubw %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
+; HASWELL-NEXT: vpsubw (%rdi), %ymm0, %ymm0 # sched: [5:0.50]
+; HASWELL-NEXT: retq # sched: [1:1.00]
+;
+; ZNVER1-LABEL: test_psubw:
+; ZNVER1: # BB#0:
+; ZNVER1-NEXT: vpsubw %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
+; ZNVER1-NEXT: vpsubw (%rdi), %ymm0, %ymm0 # sched: [6:1.00]
+; ZNVER1-NEXT: retq # sched: [4:1.00]
+ %1 = sub <16 x i16> %a0, %a1
+ %2 = load <16 x i16>, <16 x i16> *%a2, align 32
+ %3 = sub <16 x i16> %1, %2
+ ret <16 x i16> %3
+}
+
+define <4 x i64> @test_pxor(<4 x i64> %a0, <4 x i64> %a1, <4 x i64> *%a2) {
+; HASWELL-LABEL: test_pxor:
+; HASWELL: # BB#0:
+; HASWELL-NEXT: vpxor %ymm1, %ymm0, %ymm0 # sched: [1:0.33]
+; HASWELL-NEXT: vpxor (%rdi), %ymm0, %ymm0 # sched: [5:0.50]
+; HASWELL-NEXT: vpaddq %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
+; HASWELL-NEXT: retq # sched: [1:1.00]
+;
+; ZNVER1-LABEL: test_pxor:
+; ZNVER1: # BB#0:
+; ZNVER1-NEXT: vpxor %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
+; ZNVER1-NEXT: vpxor (%rdi), %ymm0, %ymm0 # sched: [6:1.00]
+; ZNVER1-NEXT: vpaddq %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
+; ZNVER1-NEXT: retq # sched: [4:1.00]
+ %1 = xor <4 x i64> %a0, %a1
+ %2 = load <4 x i64>, <4 x i64> *%a2, align 32
+ %3 = xor <4 x i64> %1, %2
+ %4 = add <4 x i64> %3, %a1
+ ret <4 x i64> %4
+}
+
+!0 = !{i32 1}
diff --git a/test/CodeGen/X86/avx512vl-arith.ll b/test/CodeGen/X86/avx512vl-arith.ll
index ef01d8656dac..9c056cdee196 100644
--- a/test/CodeGen/X86/avx512vl-arith.ll
+++ b/test/CodeGen/X86/avx512vl-arith.ll
@@ -1,36 +1,42 @@
-; RUN: llc < %s -mtriple=x86_64-apple-darwin -mcpu=knl -mattr=+avx512vl| FileCheck %s
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc < %s -mtriple=x86_64-apple-darwin -mcpu=knl -mattr=+avx512vl --show-mc-encoding| FileCheck %s
; 256-bit
-; CHECK-LABEL: vpaddq256_test
-; CHECK: vpaddq %ymm{{.*}}
-; CHECK: ret
define <4 x i64> @vpaddq256_test(<4 x i64> %i, <4 x i64> %j) nounwind readnone {
+; CHECK-LABEL: vpaddq256_test:
+; CHECK: ## BB#0:
+; CHECK-NEXT: vpaddq %ymm1, %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0xd4,0xc1]
+; CHECK-NEXT: retq ## encoding: [0xc3]
%x = add <4 x i64> %i, %j
ret <4 x i64> %x
}
-; CHECK-LABEL: vpaddq256_fold_test
-; CHECK: vpaddq (%rdi), %ymm{{.*}}
-; CHECK: ret
define <4 x i64> @vpaddq256_fold_test(<4 x i64> %i, <4 x i64>* %j) nounwind {
+; CHECK-LABEL: vpaddq256_fold_test:
+; CHECK: ## BB#0:
+; CHECK-NEXT: vpaddq (%rdi), %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0xd4,0x07]
+; CHECK-NEXT: retq ## encoding: [0xc3]
%tmp = load <4 x i64>, <4 x i64>* %j, align 4
%x = add <4 x i64> %i, %tmp
ret <4 x i64> %x
}
-; CHECK-LABEL: vpaddq256_broadcast_test
-; CHECK: vpaddq LCP{{.*}}(%rip){1to4}, %ymm{{.*}}
-; CHECK: ret
define <4 x i64> @vpaddq256_broadcast_test(<4 x i64> %i) nounwind {
+; CHECK-LABEL: vpaddq256_broadcast_test:
+; CHECK: ## BB#0:
+; CHECK-NEXT: vpaddq {{.*}}(%rip){1to4}, %ymm0, %ymm0 ## encoding: [0x62,0xf1,0xfd,0x38,0xd4,0x05,A,A,A,A]
+; CHECK-NEXT: ## fixup A - offset: 6, value: LCPI2_0-4, kind: reloc_riprel_4byte
+; CHECK-NEXT: retq ## encoding: [0xc3]
%x = add <4 x i64> %i, <i64 1, i64 1, i64 1, i64 1>
ret <4 x i64> %x
}
-; CHECK-LABEL: vpaddq256_broadcast2_test
-; CHECK: vpaddq (%rdi){1to4}, %ymm{{.*}}
-; CHECK: ret
define <4 x i64> @vpaddq256_broadcast2_test(<4 x i64> %i, i64* %j.ptr) nounwind {
+; CHECK-LABEL: vpaddq256_broadcast2_test:
+; CHECK: ## BB#0:
+; CHECK-NEXT: vpaddq (%rdi){1to4}, %ymm0, %ymm0 ## encoding: [0x62,0xf1,0xfd,0x38,0xd4,0x07]
+; CHECK-NEXT: retq ## encoding: [0xc3]
%j = load i64, i64* %j.ptr
%j.0 = insertelement <4 x i64> undef, i64 %j, i32 0
%j.v = shufflevector <4 x i64> %j.0, <4 x i64> undef, <4 x i32> zeroinitializer
@@ -38,55 +44,68 @@ define <4 x i64> @vpaddq256_broadcast2_test(<4 x i64> %i, i64* %j.ptr) nounwind
ret <4 x i64> %x
}
-; CHECK-LABEL: vpaddd256_test
-; CHECK: vpaddd %ymm{{.*}}
-; CHECK: ret
define <8 x i32> @vpaddd256_test(<8 x i32> %i, <8 x i32> %j) nounwind readnone {
+; CHECK-LABEL: vpaddd256_test:
+; CHECK: ## BB#0:
+; CHECK-NEXT: vpaddd %ymm1, %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0xfe,0xc1]
+; CHECK-NEXT: retq ## encoding: [0xc3]
%x = add <8 x i32> %i, %j
ret <8 x i32> %x
}
-; CHECK-LABEL: vpaddd256_fold_test
-; CHECK: vpaddd (%rdi), %ymm{{.*}}
-; CHECK: ret
define <8 x i32> @vpaddd256_fold_test(<8 x i32> %i, <8 x i32>* %j) nounwind {
+; CHECK-LABEL: vpaddd256_fold_test:
+; CHECK: ## BB#0:
+; CHECK-NEXT: vpaddd (%rdi), %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0xfe,0x07]
+; CHECK-NEXT: retq ## encoding: [0xc3]
%tmp = load <8 x i32>, <8 x i32>* %j, align 4
%x = add <8 x i32> %i, %tmp
ret <8 x i32> %x
}
-; CHECK-LABEL: vpaddd256_broadcast_test
-; CHECK: vpaddd LCP{{.*}}(%rip){1to8}, %ymm{{.*}}
-; CHECK: ret
define <8 x i32> @vpaddd256_broadcast_test(<8 x i32> %i) nounwind {
+; CHECK-LABEL: vpaddd256_broadcast_test:
+; CHECK: ## BB#0:
+; CHECK-NEXT: vpaddd {{.*}}(%rip){1to8}, %ymm0, %ymm0 ## encoding: [0x62,0xf1,0x7d,0x38,0xfe,0x05,A,A,A,A]
+; CHECK-NEXT: ## fixup A - offset: 6, value: LCPI6_0-4, kind: reloc_riprel_4byte
+; CHECK-NEXT: retq ## encoding: [0xc3]
%x = add <8 x i32> %i, <i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1>
ret <8 x i32> %x
}
-; CHECK-LABEL: vpaddd256_mask_test
-; CHECK: vpaddd %ymm{{.*%k[1-7].*}}
-; CHECK: ret
define <8 x i32> @vpaddd256_mask_test(<8 x i32> %i, <8 x i32> %j, <8 x i32> %mask1) nounwind readnone {
+; CHECK-LABEL: vpaddd256_mask_test:
+; CHECK: ## BB#0:
+; CHECK-NEXT: vpxor %ymm3, %ymm3, %ymm3 ## EVEX TO VEX Compression encoding: [0xc5,0xe5,0xef,0xdb]
+; CHECK-NEXT: vpcmpneqd %ymm3, %ymm2, %k1 ## encoding: [0x62,0xf3,0x6d,0x28,0x1f,0xcb,0x04]
+; CHECK-NEXT: vpaddd %ymm1, %ymm0, %ymm0 {%k1} ## encoding: [0x62,0xf1,0x7d,0x29,0xfe,0xc1]
+; CHECK-NEXT: retq ## encoding: [0xc3]
%mask = icmp ne <8 x i32> %mask1, zeroinitializer
%x = add <8 x i32> %i, %j
%r = select <8 x i1> %mask, <8 x i32> %x, <8 x i32> %i
ret <8 x i32> %r
}
-; CHECK-LABEL: vpaddd256_maskz_test
-; CHECK: vpaddd %ymm{{.*{%k[1-7]} {z}.*}}
-; CHECK: ret
define <8 x i32> @vpaddd256_maskz_test(<8 x i32> %i, <8 x i32> %j, <8 x i32> %mask1) nounwind readnone {
+; CHECK-LABEL: vpaddd256_maskz_test:
+; CHECK: ## BB#0:
+; CHECK-NEXT: vpxor %ymm3, %ymm3, %ymm3 ## EVEX TO VEX Compression encoding: [0xc5,0xe5,0xef,0xdb]
+; CHECK-NEXT: vpcmpneqd %ymm3, %ymm2, %k1 ## encoding: [0x62,0xf3,0x6d,0x28,0x1f,0xcb,0x04]
+; CHECK-NEXT: vpaddd %ymm1, %ymm0, %ymm0 {%k1} {z} ## encoding: [0x62,0xf1,0x7d,0xa9,0xfe,0xc1]
+; CHECK-NEXT: retq ## encoding: [0xc3]
%mask = icmp ne <8 x i32> %mask1, zeroinitializer
%x = add <8 x i32> %i, %j
%r = select <8 x i1> %mask, <8 x i32> %x, <8 x i32> zeroinitializer
ret <8 x i32> %r
}
-; CHECK-LABEL: vpaddd256_mask_fold_test
-; CHECK: vpaddd (%rdi), %ymm{{.*%k[1-7]}}
-; CHECK: ret
define <8 x i32> @vpaddd256_mask_fold_test(<8 x i32> %i, <8 x i32>* %j.ptr, <8 x i32> %mask1) nounwind readnone {
+; CHECK-LABEL: vpaddd256_mask_fold_test:
+; CHECK: ## BB#0:
+; CHECK-NEXT: vpxor %ymm2, %ymm2, %ymm2 ## EVEX TO VEX Compression encoding: [0xc5,0xed,0xef,0xd2]
+; CHECK-NEXT: vpcmpneqd %ymm2, %ymm1, %k1 ## encoding: [0x62,0xf3,0x75,0x28,0x1f,0xca,0x04]
+; CHECK-NEXT: vpaddd (%rdi), %ymm0, %ymm0 {%k1} ## encoding: [0x62,0xf1,0x7d,0x29,0xfe,0x07]
+; CHECK-NEXT: retq ## encoding: [0xc3]
%mask = icmp ne <8 x i32> %mask1, zeroinitializer
%j = load <8 x i32>, <8 x i32>* %j.ptr
%x = add <8 x i32> %i, %j
@@ -94,20 +113,27 @@ define <8 x i32> @vpaddd256_mask_fold_test(<8 x i32> %i, <8 x i32>* %j.ptr, <8 x
ret <8 x i32> %r
}
-; CHECK-LABEL: vpaddd256_mask_broadcast_test
-; CHECK: vpaddd LCP{{.*}}(%rip){1to8}, %ymm{{.*{%k[1-7]}}}
-; CHECK: ret
define <8 x i32> @vpaddd256_mask_broadcast_test(<8 x i32> %i, <8 x i32> %mask1) nounwind readnone {
+; CHECK-LABEL: vpaddd256_mask_broadcast_test:
+; CHECK: ## BB#0:
+; CHECK-NEXT: vpxor %ymm2, %ymm2, %ymm2 ## EVEX TO VEX Compression encoding: [0xc5,0xed,0xef,0xd2]
+; CHECK-NEXT: vpcmpneqd %ymm2, %ymm1, %k1 ## encoding: [0x62,0xf3,0x75,0x28,0x1f,0xca,0x04]
+; CHECK-NEXT: vpaddd {{.*}}(%rip){1to8}, %ymm0, %ymm0 {%k1} ## encoding: [0x62,0xf1,0x7d,0x39,0xfe,0x05,A,A,A,A]
+; CHECK-NEXT: ## fixup A - offset: 6, value: LCPI10_0-4, kind: reloc_riprel_4byte
+; CHECK-NEXT: retq ## encoding: [0xc3]
%mask = icmp ne <8 x i32> %mask1, zeroinitializer
%x = add <8 x i32> %i, <i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1>
%r = select <8 x i1> %mask, <8 x i32> %x, <8 x i32> %i
ret <8 x i32> %r
}
-; CHECK-LABEL: vpaddd256_maskz_fold_test
-; CHECK: vpaddd (%rdi), %ymm{{.*{%k[1-7]} {z}}}
-; CHECK: ret
define <8 x i32> @vpaddd256_maskz_fold_test(<8 x i32> %i, <8 x i32>* %j.ptr, <8 x i32> %mask1) nounwind readnone {
+; CHECK-LABEL: vpaddd256_maskz_fold_test:
+; CHECK: ## BB#0:
+; CHECK-NEXT: vpxor %ymm2, %ymm2, %ymm2 ## EVEX TO VEX Compression encoding: [0xc5,0xed,0xef,0xd2]
+; CHECK-NEXT: vpcmpneqd %ymm2, %ymm1, %k1 ## encoding: [0x62,0xf3,0x75,0x28,0x1f,0xca,0x04]
+; CHECK-NEXT: vpaddd (%rdi), %ymm0, %ymm0 {%k1} {z} ## encoding: [0x62,0xf1,0x7d,0xa9,0xfe,0x07]
+; CHECK-NEXT: retq ## encoding: [0xc3]
%mask = icmp ne <8 x i32> %mask1, zeroinitializer
%j = load <8 x i32>, <8 x i32>* %j.ptr
%x = add <8 x i32> %i, %j
@@ -115,96 +141,111 @@ define <8 x i32> @vpaddd256_maskz_fold_test(<8 x i32> %i, <8 x i32>* %j.ptr, <8
ret <8 x i32> %r
}
-; CHECK-LABEL: vpaddd256_maskz_broadcast_test
-; CHECK: vpaddd LCP{{.*}}(%rip){1to8}, %ymm{{.*{%k[1-7]} {z}}}
-; CHECK: ret
define <8 x i32> @vpaddd256_maskz_broadcast_test(<8 x i32> %i, <8 x i32> %mask1) nounwind readnone {
+; CHECK-LABEL: vpaddd256_maskz_broadcast_test:
+; CHECK: ## BB#0:
+; CHECK-NEXT: vpxor %ymm2, %ymm2, %ymm2 ## EVEX TO VEX Compression encoding: [0xc5,0xed,0xef,0xd2]
+; CHECK-NEXT: vpcmpneqd %ymm2, %ymm1, %k1 ## encoding: [0x62,0xf3,0x75,0x28,0x1f,0xca,0x04]
+; CHECK-NEXT: vpaddd {{.*}}(%rip){1to8}, %ymm0, %ymm0 {%k1} {z} ## encoding: [0x62,0xf1,0x7d,0xb9,0xfe,0x05,A,A,A,A]
+; CHECK-NEXT: ## fixup A - offset: 6, value: LCPI12_0-4, kind: reloc_riprel_4byte
+; CHECK-NEXT: retq ## encoding: [0xc3]
%mask = icmp ne <8 x i32> %mask1, zeroinitializer
%x = add <8 x i32> %i, <i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1>
%r = select <8 x i1> %mask, <8 x i32> %x, <8 x i32> zeroinitializer
ret <8 x i32> %r
}
-; CHECK-LABEL: vpsubq256_test
-; CHECK: vpsubq %ymm{{.*}}
-; CHECK: ret
define <4 x i64> @vpsubq256_test(<4 x i64> %i, <4 x i64> %j) nounwind readnone {
+; CHECK-LABEL: vpsubq256_test:
+; CHECK: ## BB#0:
+; CHECK-NEXT: vpsubq %ymm1, %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0xfb,0xc1]
+; CHECK-NEXT: retq ## encoding: [0xc3]
%x = sub <4 x i64> %i, %j
ret <4 x i64> %x
}
-; CHECK-LABEL: vpsubd256_test
-; CHECK: vpsubd %ymm{{.*}}
-; CHECK: ret
define <8 x i32> @vpsubd256_test(<8 x i32> %i, <8 x i32> %j) nounwind readnone {
+; CHECK-LABEL: vpsubd256_test:
+; CHECK: ## BB#0:
+; CHECK-NEXT: vpsubd %ymm1, %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0xfa,0xc1]
+; CHECK-NEXT: retq ## encoding: [0xc3]
%x = sub <8 x i32> %i, %j
ret <8 x i32> %x
}
-; CHECK-LABEL: vpmulld256_test
-; CHECK: vpmulld %ymm{{.*}}
-; CHECK: ret
define <8 x i32> @vpmulld256_test(<8 x i32> %i, <8 x i32> %j) {
+; CHECK-LABEL: vpmulld256_test:
+; CHECK: ## BB#0:
+; CHECK-NEXT: vpmulld %ymm1, %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0x7d,0x40,0xc1]
+; CHECK-NEXT: retq ## encoding: [0xc3]
%x = mul <8 x i32> %i, %j
ret <8 x i32> %x
}
-; CHECK-LABEL: test_vaddpd_256
-; CHECK: vaddpd{{.*}}
-; CHECK: ret
define <4 x double> @test_vaddpd_256(<4 x double> %y, <4 x double> %x) {
+; CHECK-LABEL: test_vaddpd_256:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vaddpd %ymm0, %ymm1, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf5,0x58,0xc0]
+; CHECK-NEXT: retq ## encoding: [0xc3]
entry:
%add.i = fadd <4 x double> %x, %y
ret <4 x double> %add.i
}
-; CHECK-LABEL: test_fold_vaddpd_256
-; CHECK: vaddpd LCP{{.*}}(%rip){{.*}}
-; CHECK: ret
define <4 x double> @test_fold_vaddpd_256(<4 x double> %y) {
+; CHECK-LABEL: test_fold_vaddpd_256:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vaddpd {{.*}}(%rip), %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0x58,0x05,A,A,A,A]
+; CHECK-NEXT: ## fixup A - offset: 4, value: LCPI17_0-4, kind: reloc_riprel_4byte
+; CHECK-NEXT: retq ## encoding: [0xc3]
entry:
%add.i = fadd <4 x double> %y, <double 4.500000e+00, double 3.400000e+00, double 4.500000e+00, double 5.600000e+00>
ret <4 x double> %add.i
}
-; CHECK-LABEL: test_broadcast_vaddpd_256
-; CHECK: LCP{{.*}}(%rip){1to8}, %ymm0, %ymm0
-; CHECK: ret
define <8 x float> @test_broadcast_vaddpd_256(<8 x float> %a) nounwind {
+; CHECK-LABEL: test_broadcast_vaddpd_256:
+; CHECK: ## BB#0:
+; CHECK-NEXT: vaddps {{.*}}(%rip){1to8}, %ymm0, %ymm0 ## encoding: [0x62,0xf1,0x7c,0x38,0x58,0x05,A,A,A,A]
+; CHECK-NEXT: ## fixup A - offset: 6, value: LCPI18_0-4, kind: reloc_riprel_4byte
+; CHECK-NEXT: retq ## encoding: [0xc3]
%b = fadd <8 x float> %a, <float 0x3FB99999A0000000, float 0x3FB99999A0000000, float 0x3FB99999A0000000, float 0x3FB99999A0000000, float 0x3FB99999A0000000, float 0x3FB99999A0000000, float 0x3FB99999A0000000, float 0x3FB99999A0000000>
ret <8 x float> %b
}
-; CHECK-LABEL: test_mask_vaddps_256
-; CHECK: vaddps {{%ymm[0-9]{1,2}, %ymm[0-9]{1,2}, %ymm[0-9]{1,2} {%k[1-7]}}}
-; CHECK: ret
-define <8 x float> @test_mask_vaddps_256(<8 x float> %dst, <8 x float> %i,
- <8 x float> %j, <8 x i32> %mask1)
- nounwind readnone {
+define <8 x float> @test_mask_vaddps_256(<8 x float> %dst, <8 x float> %i, <8 x float> %j, <8 x i32> %mask1) nounwind readnone {
+; CHECK-LABEL: test_mask_vaddps_256:
+; CHECK: ## BB#0:
+; CHECK-NEXT: vpxor %ymm4, %ymm4, %ymm4 ## EVEX TO VEX Compression encoding: [0xc5,0xdd,0xef,0xe4]
+; CHECK-NEXT: vpcmpneqd %ymm4, %ymm3, %k1 ## encoding: [0x62,0xf3,0x65,0x28,0x1f,0xcc,0x04]
+; CHECK-NEXT: vaddps %ymm2, %ymm1, %ymm0 {%k1} ## encoding: [0x62,0xf1,0x74,0x29,0x58,0xc2]
+; CHECK-NEXT: retq ## encoding: [0xc3]
%mask = icmp ne <8 x i32> %mask1, zeroinitializer
%x = fadd <8 x float> %i, %j
%r = select <8 x i1> %mask, <8 x float> %x, <8 x float> %dst
ret <8 x float> %r
}
-; CHECK-LABEL: test_mask_vmulps_256
-; CHECK: vmulps {{%ymm[0-9]{1,2}, %ymm[0-9]{1,2}, %ymm[0-9]{1,2} {%k[1-7]}}}
-; CHECK: ret
-define <8 x float> @test_mask_vmulps_256(<8 x float> %dst, <8 x float> %i,
- <8 x float> %j, <8 x i32> %mask1)
- nounwind readnone {
+define <8 x float> @test_mask_vmulps_256(<8 x float> %dst, <8 x float> %i, <8 x float> %j, <8 x i32> %mask1) nounwind readnone {
+; CHECK-LABEL: test_mask_vmulps_256:
+; CHECK: ## BB#0:
+; CHECK-NEXT: vpxor %ymm4, %ymm4, %ymm4 ## EVEX TO VEX Compression encoding: [0xc5,0xdd,0xef,0xe4]
+; CHECK-NEXT: vpcmpneqd %ymm4, %ymm3, %k1 ## encoding: [0x62,0xf3,0x65,0x28,0x1f,0xcc,0x04]
+; CHECK-NEXT: vmulps %ymm2, %ymm1, %ymm0 {%k1} ## encoding: [0x62,0xf1,0x74,0x29,0x59,0xc2]
+; CHECK-NEXT: retq ## encoding: [0xc3]
%mask = icmp ne <8 x i32> %mask1, zeroinitializer
%x = fmul <8 x float> %i, %j
%r = select <8 x i1> %mask, <8 x float> %x, <8 x float> %dst
ret <8 x float> %r
}
-; CHECK-LABEL: test_mask_vminps_256
-; CHECK: vminps {{%ymm[0-9]{1,2}, %ymm[0-9]{1,2}, %ymm[0-9]{1,2} {%k[1-7]}}}
-; CHECK: ret
-define <8 x float> @test_mask_vminps_256(<8 x float> %dst, <8 x float> %i,
- <8 x float> %j, <8 x i32> %mask1)
- nounwind readnone {
+define <8 x float> @test_mask_vminps_256(<8 x float> %dst, <8 x float> %i, <8 x float> %j, <8 x i32> %mask1)nounwind readnone {
+; CHECK-LABEL: test_mask_vminps_256:
+; CHECK: ## BB#0:
+; CHECK-NEXT: vpxor %ymm4, %ymm4, %ymm4 ## EVEX TO VEX Compression encoding: [0xc5,0xdd,0xef,0xe4]
+; CHECK-NEXT: vpcmpneqd %ymm4, %ymm3, %k1 ## encoding: [0x62,0xf3,0x65,0x28,0x1f,0xcc,0x04]
+; CHECK-NEXT: vminps %ymm2, %ymm1, %ymm0 {%k1} ## encoding: [0x62,0xf1,0x74,0x29,0x5d,0xc2]
+; CHECK-NEXT: retq ## encoding: [0xc3]
%mask = icmp ne <8 x i32> %mask1, zeroinitializer
%cmp_res = fcmp olt <8 x float> %i, %j
%min = select <8 x i1> %cmp_res, <8 x float> %i, <8 x float> %j
@@ -212,12 +253,13 @@ define <8 x float> @test_mask_vminps_256(<8 x float> %dst, <8 x float> %i,
ret <8 x float> %r
}
-; CHECK-LABEL: test_mask_vmaxps_256
-; CHECK: vmaxps {{%ymm[0-9]{1,2}, %ymm[0-9]{1,2}, %ymm[0-9]{1,2} {%k[1-7]}}}
-; CHECK: ret
-define <8 x float> @test_mask_vmaxps_256(<8 x float> %dst, <8 x float> %i,
- <8 x float> %j, <8 x i32> %mask1)
- nounwind readnone {
+define <8 x float> @test_mask_vmaxps_256(<8 x float> %dst, <8 x float> %i, <8 x float> %j, <8 x i32> %mask1) nounwind readnone {
+; CHECK-LABEL: test_mask_vmaxps_256:
+; CHECK: ## BB#0:
+; CHECK-NEXT: vpxor %ymm4, %ymm4, %ymm4 ## EVEX TO VEX Compression encoding: [0xc5,0xdd,0xef,0xe4]
+; CHECK-NEXT: vpcmpneqd %ymm4, %ymm3, %k1 ## encoding: [0x62,0xf3,0x65,0x28,0x1f,0xcc,0x04]
+; CHECK-NEXT: vmaxps %ymm2, %ymm1, %ymm0 {%k1} ## encoding: [0x62,0xf1,0x74,0x29,0x5f,0xc2]
+; CHECK-NEXT: retq ## encoding: [0xc3]
%mask = icmp ne <8 x i32> %mask1, zeroinitializer
%cmp_res = fcmp ogt <8 x float> %i, %j
%max = select <8 x i1> %cmp_res, <8 x float> %i, <8 x float> %j
@@ -225,48 +267,52 @@ define <8 x float> @test_mask_vmaxps_256(<8 x float> %dst, <8 x float> %i,
ret <8 x float> %r
}
-; CHECK-LABEL: test_mask_vsubps_256
-; CHECK: vsubps {{%ymm[0-9]{1,2}, %ymm[0-9]{1,2}, %ymm[0-9]{1,2} {%k[1-7]}}}
-; CHECK: ret
-define <8 x float> @test_mask_vsubps_256(<8 x float> %dst, <8 x float> %i,
- <8 x float> %j, <8 x i32> %mask1)
- nounwind readnone {
+define <8 x float> @test_mask_vsubps_256(<8 x float> %dst, <8 x float> %i, <8 x float> %j, <8 x i32> %mask1) nounwind readnone {
+; CHECK-LABEL: test_mask_vsubps_256:
+; CHECK: ## BB#0:
+; CHECK-NEXT: vpxor %ymm4, %ymm4, %ymm4 ## EVEX TO VEX Compression encoding: [0xc5,0xdd,0xef,0xe4]
+; CHECK-NEXT: vpcmpneqd %ymm4, %ymm3, %k1 ## encoding: [0x62,0xf3,0x65,0x28,0x1f,0xcc,0x04]
+; CHECK-NEXT: vsubps %ymm2, %ymm1, %ymm0 {%k1} ## encoding: [0x62,0xf1,0x74,0x29,0x5c,0xc2]
+; CHECK-NEXT: retq ## encoding: [0xc3]
%mask = icmp ne <8 x i32> %mask1, zeroinitializer
%x = fsub <8 x float> %i, %j
%r = select <8 x i1> %mask, <8 x float> %x, <8 x float> %dst
ret <8 x float> %r
}
-; CHECK-LABEL: test_mask_vdivps_256
-; CHECK: vdivps {{%ymm[0-9]{1,2}, %ymm[0-9]{1,2}, %ymm[0-9]{1,2} {%k[1-7]}}}
-; CHECK: ret
-define <8 x float> @test_mask_vdivps_256(<8 x float> %dst, <8 x float> %i,
- <8 x float> %j, <8 x i32> %mask1)
- nounwind readnone {
+define <8 x float> @test_mask_vdivps_256(<8 x float> %dst, <8 x float> %i, <8 x float> %j, <8 x i32> %mask1) nounwind readnone {
+; CHECK-LABEL: test_mask_vdivps_256:
+; CHECK: ## BB#0:
+; CHECK-NEXT: vpxor %ymm4, %ymm4, %ymm4 ## EVEX TO VEX Compression encoding: [0xc5,0xdd,0xef,0xe4]
+; CHECK-NEXT: vpcmpneqd %ymm4, %ymm3, %k1 ## encoding: [0x62,0xf3,0x65,0x28,0x1f,0xcc,0x04]
+; CHECK-NEXT: vdivps %ymm2, %ymm1, %ymm0 {%k1} ## encoding: [0x62,0xf1,0x74,0x29,0x5e,0xc2]
+; CHECK-NEXT: retq ## encoding: [0xc3]
%mask = icmp ne <8 x i32> %mask1, zeroinitializer
%x = fdiv <8 x float> %i, %j
%r = select <8 x i1> %mask, <8 x float> %x, <8 x float> %dst
ret <8 x float> %r
}
-; CHECK-LABEL: test_mask_vmulpd_256
-; CHECK: vmulpd {{%ymm[0-9]{1,2}, %ymm[0-9]{1,2}, %ymm[0-9]{1,2} {%k[1-7]}}}
-; CHECK: ret
-define <4 x double> @test_mask_vmulpd_256(<4 x double> %dst, <4 x double> %i,
- <4 x double> %j, <4 x i64> %mask1)
- nounwind readnone {
+define <4 x double> @test_mask_vmulpd_256(<4 x double> %dst, <4 x double> %i, <4 x double> %j, <4 x i64> %mask1) nounwind readnone {
+; CHECK-LABEL: test_mask_vmulpd_256:
+; CHECK: ## BB#0:
+; CHECK-NEXT: vpxor %ymm4, %ymm4, %ymm4 ## EVEX TO VEX Compression encoding: [0xc5,0xdd,0xef,0xe4]
+; CHECK-NEXT: vpcmpneqq %ymm4, %ymm3, %k1 ## encoding: [0x62,0xf3,0xe5,0x28,0x1f,0xcc,0x04]
+; CHECK-NEXT: vmulpd %ymm2, %ymm1, %ymm0 {%k1} ## encoding: [0x62,0xf1,0xf5,0x29,0x59,0xc2]
+; CHECK-NEXT: retq ## encoding: [0xc3]
%mask = icmp ne <4 x i64> %mask1, zeroinitializer
%x = fmul <4 x double> %i, %j
%r = select <4 x i1> %mask, <4 x double> %x, <4 x double> %dst
ret <4 x double> %r
}
-; CHECK-LABEL: test_mask_vminpd_256
-; CHECK: vminpd {{%ymm[0-9]{1,2}, %ymm[0-9]{1,2}, %ymm[0-9]{1,2} {%k[1-7]}}}
-; CHECK: ret
-define <4 x double> @test_mask_vminpd_256(<4 x double> %dst, <4 x double> %i,
- <4 x double> %j, <4 x i64> %mask1)
- nounwind readnone {
+define <4 x double> @test_mask_vminpd_256(<4 x double> %dst, <4 x double> %i, <4 x double> %j, <4 x i64> %mask1) nounwind readnone {
+; CHECK-LABEL: test_mask_vminpd_256:
+; CHECK: ## BB#0:
+; CHECK-NEXT: vpxor %ymm4, %ymm4, %ymm4 ## EVEX TO VEX Compression encoding: [0xc5,0xdd,0xef,0xe4]
+; CHECK-NEXT: vpcmpneqq %ymm4, %ymm3, %k1 ## encoding: [0x62,0xf3,0xe5,0x28,0x1f,0xcc,0x04]
+; CHECK-NEXT: vminpd %ymm2, %ymm1, %ymm0 {%k1} ## encoding: [0x62,0xf1,0xf5,0x29,0x5d,0xc2]
+; CHECK-NEXT: retq ## encoding: [0xc3]
%mask = icmp ne <4 x i64> %mask1, zeroinitializer
%cmp_res = fcmp olt <4 x double> %i, %j
%min = select <4 x i1> %cmp_res, <4 x double> %i, <4 x double> %j
@@ -274,12 +320,13 @@ define <4 x double> @test_mask_vminpd_256(<4 x double> %dst, <4 x double> %i,
ret <4 x double> %r
}
-; CHECK-LABEL: test_mask_vmaxpd_256
-; CHECK: vmaxpd {{%ymm[0-9]{1,2}, %ymm[0-9]{1,2}, %ymm[0-9]{1,2} {%k[1-7]}}}
-; CHECK: ret
-define <4 x double> @test_mask_vmaxpd_256(<4 x double> %dst, <4 x double> %i,
- <4 x double> %j, <4 x i64> %mask1)
- nounwind readnone {
+define <4 x double> @test_mask_vmaxpd_256(<4 x double> %dst, <4 x double> %i, <4 x double> %j, <4 x i64> %mask1) nounwind readnone {
+; CHECK-LABEL: test_mask_vmaxpd_256:
+; CHECK: ## BB#0:
+; CHECK-NEXT: vpxor %ymm4, %ymm4, %ymm4 ## EVEX TO VEX Compression encoding: [0xc5,0xdd,0xef,0xe4]
+; CHECK-NEXT: vpcmpneqq %ymm4, %ymm3, %k1 ## encoding: [0x62,0xf3,0xe5,0x28,0x1f,0xcc,0x04]
+; CHECK-NEXT: vmaxpd %ymm2, %ymm1, %ymm0 {%k1} ## encoding: [0x62,0xf1,0xf5,0x29,0x5f,0xc2]
+; CHECK-NEXT: retq ## encoding: [0xc3]
%mask = icmp ne <4 x i64> %mask1, zeroinitializer
%cmp_res = fcmp ogt <4 x double> %i, %j
%max = select <4 x i1> %cmp_res, <4 x double> %i, <4 x double> %j
@@ -287,59 +334,65 @@ define <4 x double> @test_mask_vmaxpd_256(<4 x double> %dst, <4 x double> %i,
ret <4 x double> %r
}
-; CHECK-LABEL: test_mask_vsubpd_256
-; CHECK: vsubpd {{%ymm[0-9]{1,2}, %ymm[0-9]{1,2}, %ymm[0-9]{1,2} {%k[1-7]}}}
-; CHECK: ret
-define <4 x double> @test_mask_vsubpd_256(<4 x double> %dst, <4 x double> %i,
- <4 x double> %j, <4 x i64> %mask1)
- nounwind readnone {
+define <4 x double> @test_mask_vsubpd_256(<4 x double> %dst, <4 x double> %i, <4 x double> %j, <4 x i64> %mask1) nounwind readnone {
+; CHECK-LABEL: test_mask_vsubpd_256:
+; CHECK: ## BB#0:
+; CHECK-NEXT: vpxor %ymm4, %ymm4, %ymm4 ## EVEX TO VEX Compression encoding: [0xc5,0xdd,0xef,0xe4]
+; CHECK-NEXT: vpcmpneqq %ymm4, %ymm3, %k1 ## encoding: [0x62,0xf3,0xe5,0x28,0x1f,0xcc,0x04]
+; CHECK-NEXT: vsubpd %ymm2, %ymm1, %ymm0 {%k1} ## encoding: [0x62,0xf1,0xf5,0x29,0x5c,0xc2]
+; CHECK-NEXT: retq ## encoding: [0xc3]
%mask = icmp ne <4 x i64> %mask1, zeroinitializer
%x = fsub <4 x double> %i, %j
%r = select <4 x i1> %mask, <4 x double> %x, <4 x double> %dst
ret <4 x double> %r
}
-; CHECK-LABEL: test_mask_vdivpd_256
-; CHECK: vdivpd {{%ymm[0-9]{1,2}, %ymm[0-9]{1,2}, %ymm[0-9]{1,2} {%k[1-7]}}}
-; CHECK: ret
-define <4 x double> @test_mask_vdivpd_256(<4 x double> %dst, <4 x double> %i,
- <4 x double> %j, <4 x i64> %mask1)
- nounwind readnone {
+define <4 x double> @test_mask_vdivpd_256(<4 x double> %dst, <4 x double> %i, <4 x double> %j, <4 x i64> %mask1) nounwind readnone {
+; CHECK-LABEL: test_mask_vdivpd_256:
+; CHECK: ## BB#0:
+; CHECK-NEXT: vpxor %ymm4, %ymm4, %ymm4 ## EVEX TO VEX Compression encoding: [0xc5,0xdd,0xef,0xe4]
+; CHECK-NEXT: vpcmpneqq %ymm4, %ymm3, %k1 ## encoding: [0x62,0xf3,0xe5,0x28,0x1f,0xcc,0x04]
+; CHECK-NEXT: vdivpd %ymm2, %ymm1, %ymm0 {%k1} ## encoding: [0x62,0xf1,0xf5,0x29,0x5e,0xc2]
+; CHECK-NEXT: retq ## encoding: [0xc3]
%mask = icmp ne <4 x i64> %mask1, zeroinitializer
%x = fdiv <4 x double> %i, %j
%r = select <4 x i1> %mask, <4 x double> %x, <4 x double> %dst
ret <4 x double> %r
}
-; CHECK-LABEL: test_mask_vaddpd_256
-; CHECK: vaddpd {{%ymm[0-9]{1,2}, %ymm[0-9]{1,2}, %ymm[0-9]{1,2} {%k[1-7]}}}
-; CHECK: ret
-define <4 x double> @test_mask_vaddpd_256(<4 x double> %dst, <4 x double> %i,
- <4 x double> %j, <4 x i64> %mask1)
- nounwind readnone {
+define <4 x double> @test_mask_vaddpd_256(<4 x double> %dst, <4 x double> %i, <4 x double> %j, <4 x i64> %mask1) nounwind readnone {
+; CHECK-LABEL: test_mask_vaddpd_256:
+; CHECK: ## BB#0:
+; CHECK-NEXT: vpxor %ymm4, %ymm4, %ymm4 ## EVEX TO VEX Compression encoding: [0xc5,0xdd,0xef,0xe4]
+; CHECK-NEXT: vpcmpneqq %ymm4, %ymm3, %k1 ## encoding: [0x62,0xf3,0xe5,0x28,0x1f,0xcc,0x04]
+; CHECK-NEXT: vaddpd %ymm2, %ymm1, %ymm0 {%k1} ## encoding: [0x62,0xf1,0xf5,0x29,0x58,0xc2]
+; CHECK-NEXT: retq ## encoding: [0xc3]
%mask = icmp ne <4 x i64> %mask1, zeroinitializer
%x = fadd <4 x double> %i, %j
%r = select <4 x i1> %mask, <4 x double> %x, <4 x double> %dst
ret <4 x double> %r
}
-; CHECK-LABEL: test_maskz_vaddpd_256
-; CHECK: vaddpd {{%ymm[0-9]{1,2}, %ymm[0-9]{1,2}, %ymm[0-9]{1,2} {%k[1-7]} {z}}}
-; CHECK: ret
-define <4 x double> @test_maskz_vaddpd_256(<4 x double> %i, <4 x double> %j,
- <4 x i64> %mask1) nounwind readnone {
+define <4 x double> @test_maskz_vaddpd_256(<4 x double> %i, <4 x double> %j, <4 x i64> %mask1) nounwind readnone {
+; CHECK-LABEL: test_maskz_vaddpd_256:
+; CHECK: ## BB#0:
+; CHECK-NEXT: vpxor %ymm3, %ymm3, %ymm3 ## EVEX TO VEX Compression encoding: [0xc5,0xe5,0xef,0xdb]
+; CHECK-NEXT: vpcmpneqq %ymm3, %ymm2, %k1 ## encoding: [0x62,0xf3,0xed,0x28,0x1f,0xcb,0x04]
+; CHECK-NEXT: vaddpd %ymm1, %ymm0, %ymm0 {%k1} {z} ## encoding: [0x62,0xf1,0xfd,0xa9,0x58,0xc1]
+; CHECK-NEXT: retq ## encoding: [0xc3]
%mask = icmp ne <4 x i64> %mask1, zeroinitializer
%x = fadd <4 x double> %i, %j
%r = select <4 x i1> %mask, <4 x double> %x, <4 x double> zeroinitializer
ret <4 x double> %r
}
-; CHECK-LABEL: test_mask_fold_vaddpd_256
-; CHECK: vaddpd (%rdi), {{.*%ymm[0-9]{1,2}, %ymm[0-9]{1,2} {%k[1-7]}.*}}
-; CHECK: ret
-define <4 x double> @test_mask_fold_vaddpd_256(<4 x double> %dst, <4 x double> %i,
- <4 x double>* %j, <4 x i64> %mask1)
- nounwind {
+define <4 x double> @test_mask_fold_vaddpd_256(<4 x double> %dst, <4 x double> %i, <4 x double>* %j, <4 x i64> %mask1) nounwind {
+; CHECK-LABEL: test_mask_fold_vaddpd_256:
+; CHECK: ## BB#0:
+; CHECK-NEXT: vpxor %ymm3, %ymm3, %ymm3 ## EVEX TO VEX Compression encoding: [0xc5,0xe5,0xef,0xdb]
+; CHECK-NEXT: vpcmpneqq %ymm3, %ymm2, %k1 ## encoding: [0x62,0xf3,0xed,0x28,0x1f,0xcb,0x04]
+; CHECK-NEXT: vaddpd (%rdi), %ymm1, %ymm0 {%k1} ## encoding: [0x62,0xf1,0xf5,0x29,0x58,0x07]
+; CHECK-NEXT: retq ## encoding: [0xc3]
%mask = icmp ne <4 x i64> %mask1, zeroinitializer
%tmp = load <4 x double>, <4 x double>* %j
%x = fadd <4 x double> %i, %tmp
@@ -347,11 +400,13 @@ define <4 x double> @test_mask_fold_vaddpd_256(<4 x double> %dst, <4 x double> %
ret <4 x double> %r
}
-; CHECK-LABEL: test_maskz_fold_vaddpd_256
-; CHECK: vaddpd (%rdi), {{.*%ymm[0-9]{1,2}, %ymm[0-9]{1,2} {%k[1-7]} {z}.*}}
-; CHECK: ret
-define <4 x double> @test_maskz_fold_vaddpd_256(<4 x double> %i, <4 x double>* %j,
- <4 x i64> %mask1) nounwind {
+define <4 x double> @test_maskz_fold_vaddpd_256(<4 x double> %i, <4 x double>* %j, <4 x i64> %mask1) nounwind {
+; CHECK-LABEL: test_maskz_fold_vaddpd_256:
+; CHECK: ## BB#0:
+; CHECK-NEXT: vpxor %ymm2, %ymm2, %ymm2 ## EVEX TO VEX Compression encoding: [0xc5,0xed,0xef,0xd2]
+; CHECK-NEXT: vpcmpneqq %ymm2, %ymm1, %k1 ## encoding: [0x62,0xf3,0xf5,0x28,0x1f,0xca,0x04]
+; CHECK-NEXT: vaddpd (%rdi), %ymm0, %ymm0 {%k1} {z} ## encoding: [0x62,0xf1,0xfd,0xa9,0x58,0x07]
+; CHECK-NEXT: retq ## encoding: [0xc3]
%mask = icmp ne <4 x i64> %mask1, zeroinitializer
%tmp = load <4 x double>, <4 x double>* %j
%x = fadd <4 x double> %i, %tmp
@@ -359,43 +414,46 @@ define <4 x double> @test_maskz_fold_vaddpd_256(<4 x double> %i, <4 x double>* %
ret <4 x double> %r
}
-; CHECK-LABEL: test_broadcast2_vaddpd_256
-; CHECK: vaddpd (%rdi){1to4}, %ymm{{.*}}
-; CHECK: ret
define <4 x double> @test_broadcast2_vaddpd_256(<4 x double> %i, double* %j) nounwind {
+; CHECK-LABEL: test_broadcast2_vaddpd_256:
+; CHECK: ## BB#0:
+; CHECK-NEXT: vaddpd (%rdi){1to4}, %ymm0, %ymm0 ## encoding: [0x62,0xf1,0xfd,0x38,0x58,0x07]
+; CHECK-NEXT: retq ## encoding: [0xc3]
%tmp = load double, double* %j
%b = insertelement <4 x double> undef, double %tmp, i32 0
- %c = shufflevector <4 x double> %b, <4 x double> undef,
- <4 x i32> zeroinitializer
+ %c = shufflevector <4 x double> %b, <4 x double> undef, <4 x i32> zeroinitializer
%x = fadd <4 x double> %c, %i
ret <4 x double> %x
}
-; CHECK-LABEL: test_mask_broadcast_vaddpd_256
-; CHECK: vaddpd (%rdi){1to4}, %ymm{{.*{%k[1-7]}.*}}
-; CHECK: ret
-define <4 x double> @test_mask_broadcast_vaddpd_256(<4 x double> %dst, <4 x double> %i,
- double* %j, <4 x i64> %mask1) nounwind {
+define <4 x double> @test_mask_broadcast_vaddpd_256(<4 x double> %dst, <4 x double> %i, double* %j, <4 x i64> %mask1) nounwind {
+; CHECK-LABEL: test_mask_broadcast_vaddpd_256:
+; CHECK: ## BB#0:
+; CHECK-NEXT: vpxor %ymm0, %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0xef,0xc0]
+; CHECK-NEXT: vpcmpneqq %ymm0, %ymm2, %k1 ## encoding: [0x62,0xf3,0xed,0x28,0x1f,0xc8,0x04]
+; CHECK-NEXT: vaddpd (%rdi){1to4}, %ymm1, %ymm1 {%k1} ## encoding: [0x62,0xf1,0xf5,0x39,0x58,0x0f]
+; CHECK-NEXT: vmovapd %ymm1, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0x28,0xc1]
+; CHECK-NEXT: retq ## encoding: [0xc3]
%mask = icmp ne <4 x i64> %mask1, zeroinitializer
%tmp = load double, double* %j
%b = insertelement <4 x double> undef, double %tmp, i32 0
- %c = shufflevector <4 x double> %b, <4 x double> undef,
- <4 x i32> zeroinitializer
+ %c = shufflevector <4 x double> %b, <4 x double> undef, <4 x i32> zeroinitializer
%x = fadd <4 x double> %c, %i
%r = select <4 x i1> %mask, <4 x double> %x, <4 x double> %i
ret <4 x double> %r
}
-; CHECK-LABEL: test_maskz_broadcast_vaddpd_256
-; CHECK: vaddpd (%rdi){1to4}, %ymm{{.*{%k[1-7]} {z}.*}}
-; CHECK: ret
-define <4 x double> @test_maskz_broadcast_vaddpd_256(<4 x double> %i, double* %j,
- <4 x i64> %mask1) nounwind {
+define <4 x double> @test_maskz_broadcast_vaddpd_256(<4 x double> %i, double* %j, <4 x i64> %mask1) nounwind {
+; CHECK-LABEL: test_maskz_broadcast_vaddpd_256:
+; CHECK: ## BB#0:
+; CHECK-NEXT: vpxor %ymm2, %ymm2, %ymm2 ## EVEX TO VEX Compression encoding: [0xc5,0xed,0xef,0xd2]
+; CHECK-NEXT: vpcmpneqq %ymm2, %ymm1, %k1 ## encoding: [0x62,0xf3,0xf5,0x28,0x1f,0xca,0x04]
+; CHECK-NEXT: vaddpd (%rdi){1to4}, %ymm0, %ymm0 {%k1} {z} ## encoding: [0x62,0xf1,0xfd,0xb9,0x58,0x07]
+; CHECK-NEXT: retq ## encoding: [0xc3]
%mask = icmp ne <4 x i64> %mask1, zeroinitializer
%tmp = load double, double* %j
%b = insertelement <4 x double> undef, double %tmp, i32 0
- %c = shufflevector <4 x double> %b, <4 x double> undef,
- <4 x i32> zeroinitializer
+ %c = shufflevector <4 x double> %b, <4 x double> undef, <4 x i32> zeroinitializer
%x = fadd <4 x double> %c, %i
%r = select <4 x i1> %mask, <4 x double> %x, <4 x double> zeroinitializer
ret <4 x double> %r
@@ -403,27 +461,30 @@ define <4 x double> @test_maskz_broadcast_vaddpd_256(<4 x double> %i, double* %j
; 128-bit
-; CHECK-LABEL: vpaddq128_test
-; CHECK: vpaddq %xmm{{.*}}
-; CHECK: ret
define <2 x i64> @vpaddq128_test(<2 x i64> %i, <2 x i64> %j) nounwind readnone {
+; CHECK-LABEL: vpaddq128_test:
+; CHECK: ## BB#0:
+; CHECK-NEXT: vpaddq %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0xd4,0xc1]
+; CHECK-NEXT: retq ## encoding: [0xc3]
%x = add <2 x i64> %i, %j
ret <2 x i64> %x
}
-; CHECK-LABEL: vpaddq128_fold_test
-; CHECK: vpaddq (%rdi), %xmm{{.*}}
-; CHECK: ret
define <2 x i64> @vpaddq128_fold_test(<2 x i64> %i, <2 x i64>* %j) nounwind {
+; CHECK-LABEL: vpaddq128_fold_test:
+; CHECK: ## BB#0:
+; CHECK-NEXT: vpaddq (%rdi), %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0xd4,0x07]
+; CHECK-NEXT: retq ## encoding: [0xc3]
%tmp = load <2 x i64>, <2 x i64>* %j, align 4
%x = add <2 x i64> %i, %tmp
ret <2 x i64> %x
}
-; CHECK-LABEL: vpaddq128_broadcast2_test
-; CHECK: vpaddq (%rdi){1to2}, %xmm{{.*}}
-; CHECK: ret
define <2 x i64> @vpaddq128_broadcast2_test(<2 x i64> %i, i64* %j) nounwind {
+; CHECK-LABEL: vpaddq128_broadcast2_test:
+; CHECK: ## BB#0:
+; CHECK-NEXT: vpaddq (%rdi){1to2}, %xmm0, %xmm0 ## encoding: [0x62,0xf1,0xfd,0x18,0xd4,0x07]
+; CHECK-NEXT: retq ## encoding: [0xc3]
%tmp = load i64, i64* %j
%j.0 = insertelement <2 x i64> undef, i64 %tmp, i32 0
%j.1 = insertelement <2 x i64> %j.0, i64 %tmp, i32 1
@@ -431,55 +492,68 @@ define <2 x i64> @vpaddq128_broadcast2_test(<2 x i64> %i, i64* %j) nounwind {
ret <2 x i64> %x
}
-; CHECK-LABEL: vpaddd128_test
-; CHECK: vpaddd %xmm{{.*}}
-; CHECK: ret
define <4 x i32> @vpaddd128_test(<4 x i32> %i, <4 x i32> %j) nounwind readnone {
+; CHECK-LABEL: vpaddd128_test:
+; CHECK: ## BB#0:
+; CHECK-NEXT: vpaddd %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0xfe,0xc1]
+; CHECK-NEXT: retq ## encoding: [0xc3]
%x = add <4 x i32> %i, %j
ret <4 x i32> %x
}
-; CHECK-LABEL: vpaddd128_fold_test
-; CHECK: vpaddd (%rdi), %xmm{{.*}}
-; CHECK: ret
define <4 x i32> @vpaddd128_fold_test(<4 x i32> %i, <4 x i32>* %j) nounwind {
+; CHECK-LABEL: vpaddd128_fold_test:
+; CHECK: ## BB#0:
+; CHECK-NEXT: vpaddd (%rdi), %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0xfe,0x07]
+; CHECK-NEXT: retq ## encoding: [0xc3]
%tmp = load <4 x i32>, <4 x i32>* %j, align 4
%x = add <4 x i32> %i, %tmp
ret <4 x i32> %x
}
-; CHECK-LABEL: vpaddd128_broadcast_test
-; CHECK: vpaddd LCP{{.*}}(%rip){1to4}, %xmm{{.*}}
-; CHECK: ret
define <4 x i32> @vpaddd128_broadcast_test(<4 x i32> %i) nounwind {
+; CHECK-LABEL: vpaddd128_broadcast_test:
+; CHECK: ## BB#0:
+; CHECK-NEXT: vpaddd {{.*}}(%rip){1to4}, %xmm0, %xmm0 ## encoding: [0x62,0xf1,0x7d,0x18,0xfe,0x05,A,A,A,A]
+; CHECK-NEXT: ## fixup A - offset: 6, value: LCPI42_0-4, kind: reloc_riprel_4byte
+; CHECK-NEXT: retq ## encoding: [0xc3]
%x = add <4 x i32> %i, <i32 1, i32 1, i32 1, i32 1>
ret <4 x i32> %x
}
-; CHECK-LABEL: vpaddd128_mask_test
-; CHECK: vpaddd %xmm{{.*%k[1-7].*}}
-; CHECK: ret
define <4 x i32> @vpaddd128_mask_test(<4 x i32> %i, <4 x i32> %j, <4 x i32> %mask1) nounwind readnone {
+; CHECK-LABEL: vpaddd128_mask_test:
+; CHECK: ## BB#0:
+; CHECK-NEXT: vpxor %xmm3, %xmm3, %xmm3 ## EVEX TO VEX Compression encoding: [0xc5,0xe1,0xef,0xdb]
+; CHECK-NEXT: vpcmpneqd %xmm3, %xmm2, %k1 ## encoding: [0x62,0xf3,0x6d,0x08,0x1f,0xcb,0x04]
+; CHECK-NEXT: vpaddd %xmm1, %xmm0, %xmm0 {%k1} ## encoding: [0x62,0xf1,0x7d,0x09,0xfe,0xc1]
+; CHECK-NEXT: retq ## encoding: [0xc3]
%mask = icmp ne <4 x i32> %mask1, zeroinitializer
%x = add <4 x i32> %i, %j
%r = select <4 x i1> %mask, <4 x i32> %x, <4 x i32> %i
ret <4 x i32> %r
}
-; CHECK-LABEL: vpaddd128_maskz_test
-; CHECK: vpaddd %xmm{{.*{%k[1-7]} {z}.*}}
-; CHECK: ret
define <4 x i32> @vpaddd128_maskz_test(<4 x i32> %i, <4 x i32> %j, <4 x i32> %mask1) nounwind readnone {
+; CHECK-LABEL: vpaddd128_maskz_test:
+; CHECK: ## BB#0:
+; CHECK-NEXT: vpxor %xmm3, %xmm3, %xmm3 ## EVEX TO VEX Compression encoding: [0xc5,0xe1,0xef,0xdb]
+; CHECK-NEXT: vpcmpneqd %xmm3, %xmm2, %k1 ## encoding: [0x62,0xf3,0x6d,0x08,0x1f,0xcb,0x04]
+; CHECK-NEXT: vpaddd %xmm1, %xmm0, %xmm0 {%k1} {z} ## encoding: [0x62,0xf1,0x7d,0x89,0xfe,0xc1]
+; CHECK-NEXT: retq ## encoding: [0xc3]
%mask = icmp ne <4 x i32> %mask1, zeroinitializer
%x = add <4 x i32> %i, %j
%r = select <4 x i1> %mask, <4 x i32> %x, <4 x i32> zeroinitializer
ret <4 x i32> %r
}
-; CHECK-LABEL: vpaddd128_mask_fold_test
-; CHECK: vpaddd (%rdi), %xmm{{.*%k[1-7]}}
-; CHECK: ret
define <4 x i32> @vpaddd128_mask_fold_test(<4 x i32> %i, <4 x i32>* %j.ptr, <4 x i32> %mask1) nounwind readnone {
+; CHECK-LABEL: vpaddd128_mask_fold_test:
+; CHECK: ## BB#0:
+; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2 ## EVEX TO VEX Compression encoding: [0xc5,0xe9,0xef,0xd2]
+; CHECK-NEXT: vpcmpneqd %xmm2, %xmm1, %k1 ## encoding: [0x62,0xf3,0x75,0x08,0x1f,0xca,0x04]
+; CHECK-NEXT: vpaddd (%rdi), %xmm0, %xmm0 {%k1} ## encoding: [0x62,0xf1,0x7d,0x09,0xfe,0x07]
+; CHECK-NEXT: retq ## encoding: [0xc3]
%mask = icmp ne <4 x i32> %mask1, zeroinitializer
%j = load <4 x i32>, <4 x i32>* %j.ptr
%x = add <4 x i32> %i, %j
@@ -487,20 +561,27 @@ define <4 x i32> @vpaddd128_mask_fold_test(<4 x i32> %i, <4 x i32>* %j.ptr, <4 x
ret <4 x i32> %r
}
-; CHECK-LABEL: vpaddd128_mask_broadcast_test
-; CHECK: vpaddd LCP{{.*}}(%rip){1to4}, %xmm{{.*{%k[1-7]}}}
-; CHECK: ret
define <4 x i32> @vpaddd128_mask_broadcast_test(<4 x i32> %i, <4 x i32> %mask1) nounwind readnone {
+; CHECK-LABEL: vpaddd128_mask_broadcast_test:
+; CHECK: ## BB#0:
+; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2 ## EVEX TO VEX Compression encoding: [0xc5,0xe9,0xef,0xd2]
+; CHECK-NEXT: vpcmpneqd %xmm2, %xmm1, %k1 ## encoding: [0x62,0xf3,0x75,0x08,0x1f,0xca,0x04]
+; CHECK-NEXT: vpaddd {{.*}}(%rip){1to4}, %xmm0, %xmm0 {%k1} ## encoding: [0x62,0xf1,0x7d,0x19,0xfe,0x05,A,A,A,A]
+; CHECK-NEXT: ## fixup A - offset: 6, value: LCPI46_0-4, kind: reloc_riprel_4byte
+; CHECK-NEXT: retq ## encoding: [0xc3]
%mask = icmp ne <4 x i32> %mask1, zeroinitializer
%x = add <4 x i32> %i, <i32 1, i32 1, i32 1, i32 1>
%r = select <4 x i1> %mask, <4 x i32> %x, <4 x i32> %i
ret <4 x i32> %r
}
-; CHECK-LABEL: vpaddd128_maskz_fold_test
-; CHECK: vpaddd (%rdi), %xmm{{.*{%k[1-7]} {z}}}
-; CHECK: ret
define <4 x i32> @vpaddd128_maskz_fold_test(<4 x i32> %i, <4 x i32>* %j.ptr, <4 x i32> %mask1) nounwind readnone {
+; CHECK-LABEL: vpaddd128_maskz_fold_test:
+; CHECK: ## BB#0:
+; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2 ## EVEX TO VEX Compression encoding: [0xc5,0xe9,0xef,0xd2]
+; CHECK-NEXT: vpcmpneqd %xmm2, %xmm1, %k1 ## encoding: [0x62,0xf3,0x75,0x08,0x1f,0xca,0x04]
+; CHECK-NEXT: vpaddd (%rdi), %xmm0, %xmm0 {%k1} {z} ## encoding: [0x62,0xf1,0x7d,0x89,0xfe,0x07]
+; CHECK-NEXT: retq ## encoding: [0xc3]
%mask = icmp ne <4 x i32> %mask1, zeroinitializer
%j = load <4 x i32>, <4 x i32>* %j.ptr
%x = add <4 x i32> %i, %j
@@ -508,96 +589,111 @@ define <4 x i32> @vpaddd128_maskz_fold_test(<4 x i32> %i, <4 x i32>* %j.ptr, <4
ret <4 x i32> %r
}
-; CHECK-LABEL: vpaddd128_maskz_broadcast_test
-; CHECK: vpaddd LCP{{.*}}(%rip){1to4}, %xmm{{.*{%k[1-7]} {z}}}
-; CHECK: ret
define <4 x i32> @vpaddd128_maskz_broadcast_test(<4 x i32> %i, <4 x i32> %mask1) nounwind readnone {
+; CHECK-LABEL: vpaddd128_maskz_broadcast_test:
+; CHECK: ## BB#0:
+; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2 ## EVEX TO VEX Compression encoding: [0xc5,0xe9,0xef,0xd2]
+; CHECK-NEXT: vpcmpneqd %xmm2, %xmm1, %k1 ## encoding: [0x62,0xf3,0x75,0x08,0x1f,0xca,0x04]
+; CHECK-NEXT: vpaddd {{.*}}(%rip){1to4}, %xmm0, %xmm0 {%k1} {z} ## encoding: [0x62,0xf1,0x7d,0x99,0xfe,0x05,A,A,A,A]
+; CHECK-NEXT: ## fixup A - offset: 6, value: LCPI48_0-4, kind: reloc_riprel_4byte
+; CHECK-NEXT: retq ## encoding: [0xc3]
%mask = icmp ne <4 x i32> %mask1, zeroinitializer
%x = add <4 x i32> %i, <i32 1, i32 1, i32 1, i32 1>
%r = select <4 x i1> %mask, <4 x i32> %x, <4 x i32> zeroinitializer
ret <4 x i32> %r
}
-; CHECK-LABEL: vpsubq128_test
-; CHECK: vpsubq %xmm{{.*}}
-; CHECK: ret
define <2 x i64> @vpsubq128_test(<2 x i64> %i, <2 x i64> %j) nounwind readnone {
+; CHECK-LABEL: vpsubq128_test:
+; CHECK: ## BB#0:
+; CHECK-NEXT: vpsubq %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0xfb,0xc1]
+; CHECK-NEXT: retq ## encoding: [0xc3]
%x = sub <2 x i64> %i, %j
ret <2 x i64> %x
}
-; CHECK-LABEL: vpsubd128_test
-; CHECK: vpsubd %xmm{{.*}}
-; CHECK: ret
define <4 x i32> @vpsubd128_test(<4 x i32> %i, <4 x i32> %j) nounwind readnone {
+; CHECK-LABEL: vpsubd128_test:
+; CHECK: ## BB#0:
+; CHECK-NEXT: vpsubd %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0xfa,0xc1]
+; CHECK-NEXT: retq ## encoding: [0xc3]
%x = sub <4 x i32> %i, %j
ret <4 x i32> %x
}
-; CHECK-LABEL: vpmulld128_test
-; CHECK: vpmulld %xmm{{.*}}
-; CHECK: ret
define <4 x i32> @vpmulld128_test(<4 x i32> %i, <4 x i32> %j) {
+; CHECK-LABEL: vpmulld128_test:
+; CHECK: ## BB#0:
+; CHECK-NEXT: vpmulld %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0x79,0x40,0xc1]
+; CHECK-NEXT: retq ## encoding: [0xc3]
%x = mul <4 x i32> %i, %j
ret <4 x i32> %x
}
-; CHECK-LABEL: test_vaddpd_128
-; CHECK: vaddpd{{.*}}
-; CHECK: ret
define <2 x double> @test_vaddpd_128(<2 x double> %y, <2 x double> %x) {
+; CHECK-LABEL: test_vaddpd_128:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vaddpd %xmm0, %xmm1, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf1,0x58,0xc0]
+; CHECK-NEXT: retq ## encoding: [0xc3]
entry:
%add.i = fadd <2 x double> %x, %y
ret <2 x double> %add.i
}
-; CHECK-LABEL: test_fold_vaddpd_128
-; CHECK: vaddpd LCP{{.*}}(%rip){{.*}}
-; CHECK: ret
define <2 x double> @test_fold_vaddpd_128(<2 x double> %y) {
+; CHECK-LABEL: test_fold_vaddpd_128:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vaddpd {{.*}}(%rip), %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0x58,0x05,A,A,A,A]
+; CHECK-NEXT: ## fixup A - offset: 4, value: LCPI53_0-4, kind: reloc_riprel_4byte
+; CHECK-NEXT: retq ## encoding: [0xc3]
entry:
%add.i = fadd <2 x double> %y, <double 4.500000e+00, double 3.400000e+00>
ret <2 x double> %add.i
}
-; CHECK-LABEL: test_broadcast_vaddpd_128
-; CHECK: LCP{{.*}}(%rip){1to4}, %xmm0, %xmm0
-; CHECK: ret
define <4 x float> @test_broadcast_vaddpd_128(<4 x float> %a) nounwind {
+; CHECK-LABEL: test_broadcast_vaddpd_128:
+; CHECK: ## BB#0:
+; CHECK-NEXT: vaddps {{.*}}(%rip){1to4}, %xmm0, %xmm0 ## encoding: [0x62,0xf1,0x7c,0x18,0x58,0x05,A,A,A,A]
+; CHECK-NEXT: ## fixup A - offset: 6, value: LCPI54_0-4, kind: reloc_riprel_4byte
+; CHECK-NEXT: retq ## encoding: [0xc3]
%b = fadd <4 x float> %a, <float 0x3FB99999A0000000, float 0x3FB99999A0000000, float 0x3FB99999A0000000, float 0x3FB99999A0000000>
ret <4 x float> %b
}
-; CHECK-LABEL: test_mask_vaddps_128
-; CHECK: vaddps {{%xmm[0-9]{1,2}, %xmm[0-9]{1,2}, %xmm[0-9]{1,2} {%k[1-7]}}}
-; CHECK: ret
-define <4 x float> @test_mask_vaddps_128(<4 x float> %dst, <4 x float> %i,
- <4 x float> %j, <4 x i32> %mask1)
- nounwind readnone {
+define <4 x float> @test_mask_vaddps_128(<4 x float> %dst, <4 x float> %i, <4 x float> %j, <4 x i32> %mask1) nounwind readnone {
+; CHECK-LABEL: test_mask_vaddps_128:
+; CHECK: ## BB#0:
+; CHECK-NEXT: vpxor %xmm4, %xmm4, %xmm4 ## EVEX TO VEX Compression encoding: [0xc5,0xd9,0xef,0xe4]
+; CHECK-NEXT: vpcmpneqd %xmm4, %xmm3, %k1 ## encoding: [0x62,0xf3,0x65,0x08,0x1f,0xcc,0x04]
+; CHECK-NEXT: vaddps %xmm2, %xmm1, %xmm0 {%k1} ## encoding: [0x62,0xf1,0x74,0x09,0x58,0xc2]
+; CHECK-NEXT: retq ## encoding: [0xc3]
%mask = icmp ne <4 x i32> %mask1, zeroinitializer
%x = fadd <4 x float> %i, %j
%r = select <4 x i1> %mask, <4 x float> %x, <4 x float> %dst
ret <4 x float> %r
}
-; CHECK-LABEL: test_mask_vmulps_128
-; CHECK: vmulps {{%xmm[0-9]{1,2}, %xmm[0-9]{1,2}, %xmm[0-9]{1,2} {%k[1-7]}}}
-; CHECK: ret
-define <4 x float> @test_mask_vmulps_128(<4 x float> %dst, <4 x float> %i,
- <4 x float> %j, <4 x i32> %mask1)
- nounwind readnone {
+define <4 x float> @test_mask_vmulps_128(<4 x float> %dst, <4 x float> %i, <4 x float> %j, <4 x i32> %mask1) nounwind readnone {
+; CHECK-LABEL: test_mask_vmulps_128:
+; CHECK: ## BB#0:
+; CHECK-NEXT: vpxor %xmm4, %xmm4, %xmm4 ## EVEX TO VEX Compression encoding: [0xc5,0xd9,0xef,0xe4]
+; CHECK-NEXT: vpcmpneqd %xmm4, %xmm3, %k1 ## encoding: [0x62,0xf3,0x65,0x08,0x1f,0xcc,0x04]
+; CHECK-NEXT: vmulps %xmm2, %xmm1, %xmm0 {%k1} ## encoding: [0x62,0xf1,0x74,0x09,0x59,0xc2]
+; CHECK-NEXT: retq ## encoding: [0xc3]
%mask = icmp ne <4 x i32> %mask1, zeroinitializer
%x = fmul <4 x float> %i, %j
%r = select <4 x i1> %mask, <4 x float> %x, <4 x float> %dst
ret <4 x float> %r
}
-; CHECK-LABEL: test_mask_vminps_128
-; CHECK: vminps {{%xmm[0-9]{1,2}, %xmm[0-9]{1,2}, %xmm[0-9]{1,2} {%k[1-7]}}}
-; CHECK: ret
-define <4 x float> @test_mask_vminps_128(<4 x float> %dst, <4 x float> %i,
- <4 x float> %j, <4 x i32> %mask1)
- nounwind readnone {
+define <4 x float> @test_mask_vminps_128(<4 x float> %dst, <4 x float> %i, <4 x float> %j, <4 x i32> %mask1) nounwind readnone {
+; CHECK-LABEL: test_mask_vminps_128:
+; CHECK: ## BB#0:
+; CHECK-NEXT: vpxor %xmm4, %xmm4, %xmm4 ## EVEX TO VEX Compression encoding: [0xc5,0xd9,0xef,0xe4]
+; CHECK-NEXT: vpcmpneqd %xmm4, %xmm3, %k1 ## encoding: [0x62,0xf3,0x65,0x08,0x1f,0xcc,0x04]
+; CHECK-NEXT: vminps %xmm2, %xmm1, %xmm0 {%k1} ## encoding: [0x62,0xf1,0x74,0x09,0x5d,0xc2]
+; CHECK-NEXT: retq ## encoding: [0xc3]
%mask = icmp ne <4 x i32> %mask1, zeroinitializer
%cmp_res = fcmp olt <4 x float> %i, %j
%min = select <4 x i1> %cmp_res, <4 x float> %i, <4 x float> %j
@@ -605,12 +701,13 @@ define <4 x float> @test_mask_vminps_128(<4 x float> %dst, <4 x float> %i,
ret <4 x float> %r
}
-; CHECK-LABEL: test_mask_vmaxps_128
-; CHECK: vmaxps {{%xmm[0-9]{1,2}, %xmm[0-9]{1,2}, %xmm[0-9]{1,2} {%k[1-7]}}}
-; CHECK: ret
-define <4 x float> @test_mask_vmaxps_128(<4 x float> %dst, <4 x float> %i,
- <4 x float> %j, <4 x i32> %mask1)
- nounwind readnone {
+define <4 x float> @test_mask_vmaxps_128(<4 x float> %dst, <4 x float> %i, <4 x float> %j, <4 x i32> %mask1) nounwind readnone {
+; CHECK-LABEL: test_mask_vmaxps_128:
+; CHECK: ## BB#0:
+; CHECK-NEXT: vpxor %xmm4, %xmm4, %xmm4 ## EVEX TO VEX Compression encoding: [0xc5,0xd9,0xef,0xe4]
+; CHECK-NEXT: vpcmpneqd %xmm4, %xmm3, %k1 ## encoding: [0x62,0xf3,0x65,0x08,0x1f,0xcc,0x04]
+; CHECK-NEXT: vmaxps %xmm2, %xmm1, %xmm0 {%k1} ## encoding: [0x62,0xf1,0x74,0x09,0x5f,0xc2]
+; CHECK-NEXT: retq ## encoding: [0xc3]
%mask = icmp ne <4 x i32> %mask1, zeroinitializer
%cmp_res = fcmp ogt <4 x float> %i, %j
%max = select <4 x i1> %cmp_res, <4 x float> %i, <4 x float> %j
@@ -618,12 +715,13 @@ define <4 x float> @test_mask_vmaxps_128(<4 x float> %dst, <4 x float> %i,
ret <4 x float> %r
}
-; CHECK-LABEL: test_mask_vsubps_128
-; CHECK: vsubps {{%xmm[0-9]{1,2}, %xmm[0-9]{1,2}, %xmm[0-9]{1,2} {%k[1-7]}}}
-; CHECK: ret
-define <4 x float> @test_mask_vsubps_128(<4 x float> %dst, <4 x float> %i,
- <4 x float> %j, <4 x i32> %mask1)
- nounwind readnone {
+define <4 x float> @test_mask_vsubps_128(<4 x float> %dst, <4 x float> %i, <4 x float> %j, <4 x i32> %mask1) nounwind readnone {
+; CHECK-LABEL: test_mask_vsubps_128:
+; CHECK: ## BB#0:
+; CHECK-NEXT: vpxor %xmm4, %xmm4, %xmm4 ## EVEX TO VEX Compression encoding: [0xc5,0xd9,0xef,0xe4]
+; CHECK-NEXT: vpcmpneqd %xmm4, %xmm3, %k1 ## encoding: [0x62,0xf3,0x65,0x08,0x1f,0xcc,0x04]
+; CHECK-NEXT: vsubps %xmm2, %xmm1, %xmm0 {%k1} ## encoding: [0x62,0xf1,0x74,0x09,0x5c,0xc2]
+; CHECK-NEXT: retq ## encoding: [0xc3]
%mask = icmp ne <4 x i32> %mask1, zeroinitializer
%x = fsub <4 x float> %i, %j
%r = select <4 x i1> %mask, <4 x float> %x, <4 x float> %dst
@@ -631,36 +729,39 @@ define <4 x float> @test_mask_vsubps_128(<4 x float> %dst, <4 x float> %i,
}
-; CHECK-LABEL: test_mask_vdivps_128
-; CHECK: vdivps {{%xmm[0-9]{1,2}, %xmm[0-9]{1,2}, %xmm[0-9]{1,2} {%k[1-7]}}}
-; CHECK: ret
-define <4 x float> @test_mask_vdivps_128(<4 x float> %dst, <4 x float> %i,
- <4 x float> %j, <4 x i32> %mask1)
- nounwind readnone {
+define <4 x float> @test_mask_vdivps_128(<4 x float> %dst, <4 x float> %i, <4 x float> %j, <4 x i32> %mask1) nounwind readnone {
+; CHECK-LABEL: test_mask_vdivps_128:
+; CHECK: ## BB#0:
+; CHECK-NEXT: vpxor %xmm4, %xmm4, %xmm4 ## EVEX TO VEX Compression encoding: [0xc5,0xd9,0xef,0xe4]
+; CHECK-NEXT: vpcmpneqd %xmm4, %xmm3, %k1 ## encoding: [0x62,0xf3,0x65,0x08,0x1f,0xcc,0x04]
+; CHECK-NEXT: vdivps %xmm2, %xmm1, %xmm0 {%k1} ## encoding: [0x62,0xf1,0x74,0x09,0x5e,0xc2]
+; CHECK-NEXT: retq ## encoding: [0xc3]
%mask = icmp ne <4 x i32> %mask1, zeroinitializer
%x = fdiv <4 x float> %i, %j
%r = select <4 x i1> %mask, <4 x float> %x, <4 x float> %dst
ret <4 x float> %r
}
-; CHECK-LABEL: test_mask_vmulpd_128
-; CHECK: vmulpd {{%xmm[0-9]{1,2}, %xmm[0-9]{1,2}, %xmm[0-9]{1,2} {%k[1-7]}}}
-; CHECK: ret
-define <2 x double> @test_mask_vmulpd_128(<2 x double> %dst, <2 x double> %i,
- <2 x double> %j, <2 x i64> %mask1)
- nounwind readnone {
+define <2 x double> @test_mask_vmulpd_128(<2 x double> %dst, <2 x double> %i, <2 x double> %j, <2 x i64> %mask1) nounwind readnone {
+; CHECK-LABEL: test_mask_vmulpd_128:
+; CHECK: ## BB#0:
+; CHECK-NEXT: vpxor %xmm4, %xmm4, %xmm4 ## EVEX TO VEX Compression encoding: [0xc5,0xd9,0xef,0xe4]
+; CHECK-NEXT: vpcmpneqq %xmm4, %xmm3, %k1 ## encoding: [0x62,0xf3,0xe5,0x08,0x1f,0xcc,0x04]
+; CHECK-NEXT: vmulpd %xmm2, %xmm1, %xmm0 {%k1} ## encoding: [0x62,0xf1,0xf5,0x09,0x59,0xc2]
+; CHECK-NEXT: retq ## encoding: [0xc3]
%mask = icmp ne <2 x i64> %mask1, zeroinitializer
%x = fmul <2 x double> %i, %j
%r = select <2 x i1> %mask, <2 x double> %x, <2 x double> %dst
ret <2 x double> %r
}
-; CHECK-LABEL: test_mask_vminpd_128
-; CHECK: vminpd {{%xmm[0-9]{1,2}, %xmm[0-9]{1,2}, %xmm[0-9]{1,2} {%k[1-7]}}}
-; CHECK: ret
-define <2 x double> @test_mask_vminpd_128(<2 x double> %dst, <2 x double> %i,
- <2 x double> %j, <2 x i64> %mask1)
- nounwind readnone {
+define <2 x double> @test_mask_vminpd_128(<2 x double> %dst, <2 x double> %i, <2 x double> %j, <2 x i64> %mask1) nounwind readnone {
+; CHECK-LABEL: test_mask_vminpd_128:
+; CHECK: ## BB#0:
+; CHECK-NEXT: vpxor %xmm4, %xmm4, %xmm4 ## EVEX TO VEX Compression encoding: [0xc5,0xd9,0xef,0xe4]
+; CHECK-NEXT: vpcmpneqq %xmm4, %xmm3, %k1 ## encoding: [0x62,0xf3,0xe5,0x08,0x1f,0xcc,0x04]
+; CHECK-NEXT: vminpd %xmm2, %xmm1, %xmm0 {%k1} ## encoding: [0x62,0xf1,0xf5,0x09,0x5d,0xc2]
+; CHECK-NEXT: retq ## encoding: [0xc3]
%mask = icmp ne <2 x i64> %mask1, zeroinitializer
%cmp_res = fcmp olt <2 x double> %i, %j
%min = select <2 x i1> %cmp_res, <2 x double> %i, <2 x double> %j
@@ -668,12 +769,13 @@ define <2 x double> @test_mask_vminpd_128(<2 x double> %dst, <2 x double> %i,
ret <2 x double> %r
}
-; CHECK-LABEL: test_mask_vmaxpd_128
-; CHECK: vmaxpd {{%xmm[0-9]{1,2}, %xmm[0-9]{1,2}, %xmm[0-9]{1,2} {%k[1-7]}}}
-; CHECK: ret
-define <2 x double> @test_mask_vmaxpd_128(<2 x double> %dst, <2 x double> %i,
- <2 x double> %j, <2 x i64> %mask1)
- nounwind readnone {
+define <2 x double> @test_mask_vmaxpd_128(<2 x double> %dst, <2 x double> %i, <2 x double> %j, <2 x i64> %mask1) nounwind readnone {
+; CHECK-LABEL: test_mask_vmaxpd_128:
+; CHECK: ## BB#0:
+; CHECK-NEXT: vpxor %xmm4, %xmm4, %xmm4 ## EVEX TO VEX Compression encoding: [0xc5,0xd9,0xef,0xe4]
+; CHECK-NEXT: vpcmpneqq %xmm4, %xmm3, %k1 ## encoding: [0x62,0xf3,0xe5,0x08,0x1f,0xcc,0x04]
+; CHECK-NEXT: vmaxpd %xmm2, %xmm1, %xmm0 {%k1} ## encoding: [0x62,0xf1,0xf5,0x09,0x5f,0xc2]
+; CHECK-NEXT: retq ## encoding: [0xc3]
%mask = icmp ne <2 x i64> %mask1, zeroinitializer
%cmp_res = fcmp ogt <2 x double> %i, %j
%max = select <2 x i1> %cmp_res, <2 x double> %i, <2 x double> %j
@@ -681,46 +783,52 @@ define <2 x double> @test_mask_vmaxpd_128(<2 x double> %dst, <2 x double> %i,
ret <2 x double> %r
}
-; CHECK-LABEL: test_mask_vsubpd_128
-; CHECK: vsubpd {{%xmm[0-9]{1,2}, %xmm[0-9]{1,2}, %xmm[0-9]{1,2} {%k[1-7]}}}
-; CHECK: ret
-define <2 x double> @test_mask_vsubpd_128(<2 x double> %dst, <2 x double> %i,
- <2 x double> %j, <2 x i64> %mask1)
- nounwind readnone {
+define <2 x double> @test_mask_vsubpd_128(<2 x double> %dst, <2 x double> %i, <2 x double> %j, <2 x i64> %mask1) nounwind readnone {
+; CHECK-LABEL: test_mask_vsubpd_128:
+; CHECK: ## BB#0:
+; CHECK-NEXT: vpxor %xmm4, %xmm4, %xmm4 ## EVEX TO VEX Compression encoding: [0xc5,0xd9,0xef,0xe4]
+; CHECK-NEXT: vpcmpneqq %xmm4, %xmm3, %k1 ## encoding: [0x62,0xf3,0xe5,0x08,0x1f,0xcc,0x04]
+; CHECK-NEXT: vsubpd %xmm2, %xmm1, %xmm0 {%k1} ## encoding: [0x62,0xf1,0xf5,0x09,0x5c,0xc2]
+; CHECK-NEXT: retq ## encoding: [0xc3]
%mask = icmp ne <2 x i64> %mask1, zeroinitializer
%x = fsub <2 x double> %i, %j
%r = select <2 x i1> %mask, <2 x double> %x, <2 x double> %dst
ret <2 x double> %r
}
-; CHECK-LABEL: test_mask_vdivpd_128
-; CHECK: vdivpd {{%xmm[0-9]{1,2}, %xmm[0-9]{1,2}, %xmm[0-9]{1,2} {%k[1-7]}}}
-; CHECK: ret
-define <2 x double> @test_mask_vdivpd_128(<2 x double> %dst, <2 x double> %i,
- <2 x double> %j, <2 x i64> %mask1)
- nounwind readnone {
+define <2 x double> @test_mask_vdivpd_128(<2 x double> %dst, <2 x double> %i, <2 x double> %j, <2 x i64> %mask1) nounwind readnone {
+; CHECK-LABEL: test_mask_vdivpd_128:
+; CHECK: ## BB#0:
+; CHECK-NEXT: vpxor %xmm4, %xmm4, %xmm4 ## EVEX TO VEX Compression encoding: [0xc5,0xd9,0xef,0xe4]
+; CHECK-NEXT: vpcmpneqq %xmm4, %xmm3, %k1 ## encoding: [0x62,0xf3,0xe5,0x08,0x1f,0xcc,0x04]
+; CHECK-NEXT: vdivpd %xmm2, %xmm1, %xmm0 {%k1} ## encoding: [0x62,0xf1,0xf5,0x09,0x5e,0xc2]
+; CHECK-NEXT: retq ## encoding: [0xc3]
%mask = icmp ne <2 x i64> %mask1, zeroinitializer
%x = fdiv <2 x double> %i, %j
%r = select <2 x i1> %mask, <2 x double> %x, <2 x double> %dst
ret <2 x double> %r
}
-; CHECK-LABEL: test_mask_vaddpd_128
-; CHECK: vaddpd {{%xmm[0-9]{1,2}, %xmm[0-9]{1,2}, %xmm[0-9]{1,2} {%k[1-7]}}}
-; CHECK: ret
-define <2 x double> @test_mask_vaddpd_128(<2 x double> %dst, <2 x double> %i,
- <2 x double> %j, <2 x i64> %mask1)
- nounwind readnone {
+define <2 x double> @test_mask_vaddpd_128(<2 x double> %dst, <2 x double> %i, <2 x double> %j, <2 x i64> %mask1) nounwind readnone {
+; CHECK-LABEL: test_mask_vaddpd_128:
+; CHECK: ## BB#0:
+; CHECK-NEXT: vpxor %xmm4, %xmm4, %xmm4 ## EVEX TO VEX Compression encoding: [0xc5,0xd9,0xef,0xe4]
+; CHECK-NEXT: vpcmpneqq %xmm4, %xmm3, %k1 ## encoding: [0x62,0xf3,0xe5,0x08,0x1f,0xcc,0x04]
+; CHECK-NEXT: vaddpd %xmm2, %xmm1, %xmm0 {%k1} ## encoding: [0x62,0xf1,0xf5,0x09,0x58,0xc2]
+; CHECK-NEXT: retq ## encoding: [0xc3]
%mask = icmp ne <2 x i64> %mask1, zeroinitializer
%x = fadd <2 x double> %i, %j
%r = select <2 x i1> %mask, <2 x double> %x, <2 x double> %dst
ret <2 x double> %r
}
-; CHECK-LABEL: test_maskz_vaddpd_128
-; CHECK: vaddpd {{%xmm[0-9]{1,2}, %xmm[0-9]{1,2}, %xmm[0-9]{1,2} {%k[1-7]} {z}}}
-; CHECK: ret
define <2 x double> @test_maskz_vaddpd_128(<2 x double> %i, <2 x double> %j,
+; CHECK-LABEL: test_maskz_vaddpd_128:
+; CHECK: ## BB#0:
+; CHECK-NEXT: vpxor %xmm3, %xmm3, %xmm3 ## EVEX TO VEX Compression encoding: [0xc5,0xe1,0xef,0xdb]
+; CHECK-NEXT: vpcmpneqq %xmm3, %xmm2, %k1 ## encoding: [0x62,0xf3,0xed,0x08,0x1f,0xcb,0x04]
+; CHECK-NEXT: vaddpd %xmm1, %xmm0, %xmm0 {%k1} {z} ## encoding: [0x62,0xf1,0xfd,0x89,0x58,0xc1]
+; CHECK-NEXT: retq ## encoding: [0xc3]
<2 x i64> %mask1) nounwind readnone {
%mask = icmp ne <2 x i64> %mask1, zeroinitializer
%x = fadd <2 x double> %i, %j
@@ -728,12 +836,13 @@ define <2 x double> @test_maskz_vaddpd_128(<2 x double> %i, <2 x double> %j,
ret <2 x double> %r
}
-; CHECK-LABEL: test_mask_fold_vaddpd_128
-; CHECK: vaddpd (%rdi), {{.*%xmm[0-9]{1,2}, %xmm[0-9]{1,2} {%k[1-7]}.*}}
-; CHECK: ret
-define <2 x double> @test_mask_fold_vaddpd_128(<2 x double> %dst, <2 x double> %i,
- <2 x double>* %j, <2 x i64> %mask1)
- nounwind {
+define <2 x double> @test_mask_fold_vaddpd_128(<2 x double> %dst, <2 x double> %i, <2 x double>* %j, <2 x i64> %mask1) nounwind {
+; CHECK-LABEL: test_mask_fold_vaddpd_128:
+; CHECK: ## BB#0:
+; CHECK-NEXT: vpxor %xmm3, %xmm3, %xmm3 ## EVEX TO VEX Compression encoding: [0xc5,0xe1,0xef,0xdb]
+; CHECK-NEXT: vpcmpneqq %xmm3, %xmm2, %k1 ## encoding: [0x62,0xf3,0xed,0x08,0x1f,0xcb,0x04]
+; CHECK-NEXT: vaddpd (%rdi), %xmm1, %xmm0 {%k1} ## encoding: [0x62,0xf1,0xf5,0x09,0x58,0x07]
+; CHECK-NEXT: retq ## encoding: [0xc3]
%mask = icmp ne <2 x i64> %mask1, zeroinitializer
%tmp = load <2 x double>, <2 x double>* %j
%x = fadd <2 x double> %i, %tmp
@@ -741,11 +850,13 @@ define <2 x double> @test_mask_fold_vaddpd_128(<2 x double> %dst, <2 x double> %
ret <2 x double> %r
}
-; CHECK-LABEL: test_maskz_fold_vaddpd_128
-; CHECK: vaddpd (%rdi), {{.*%xmm[0-9]{1,2}, %xmm[0-9]{1,2} {%k[1-7]} {z}.*}}
-; CHECK: ret
-define <2 x double> @test_maskz_fold_vaddpd_128(<2 x double> %i, <2 x double>* %j,
- <2 x i64> %mask1) nounwind {
+define <2 x double> @test_maskz_fold_vaddpd_128(<2 x double> %i, <2 x double>* %j, <2 x i64> %mask1) nounwind {
+; CHECK-LABEL: test_maskz_fold_vaddpd_128:
+; CHECK: ## BB#0:
+; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2 ## EVEX TO VEX Compression encoding: [0xc5,0xe9,0xef,0xd2]
+; CHECK-NEXT: vpcmpneqq %xmm2, %xmm1, %k1 ## encoding: [0x62,0xf3,0xf5,0x08,0x1f,0xca,0x04]
+; CHECK-NEXT: vaddpd (%rdi), %xmm0, %xmm0 {%k1} {z} ## encoding: [0x62,0xf1,0xfd,0x89,0x58,0x07]
+; CHECK-NEXT: retq ## encoding: [0xc3]
%mask = icmp ne <2 x i64> %mask1, zeroinitializer
%tmp = load <2 x double>, <2 x double>* %j
%x = fadd <2 x double> %i, %tmp
@@ -753,10 +864,11 @@ define <2 x double> @test_maskz_fold_vaddpd_128(<2 x double> %i, <2 x double>* %
ret <2 x double> %r
}
-; CHECK-LABEL: test_broadcast2_vaddpd_128
-; CHECK: vaddpd (%rdi){1to2}, %xmm{{.*}}
-; CHECK: ret
define <2 x double> @test_broadcast2_vaddpd_128(<2 x double> %i, double* %j) nounwind {
+; CHECK-LABEL: test_broadcast2_vaddpd_128:
+; CHECK: ## BB#0:
+; CHECK-NEXT: vaddpd (%rdi){1to2}, %xmm0, %xmm0 ## encoding: [0x62,0xf1,0xfd,0x18,0x58,0x07]
+; CHECK-NEXT: retq ## encoding: [0xc3]
%tmp = load double, double* %j
%j.0 = insertelement <2 x double> undef, double %tmp, i64 0
%j.1 = insertelement <2 x double> %j.0, double %tmp, i64 1
@@ -764,12 +876,14 @@ define <2 x double> @test_broadcast2_vaddpd_128(<2 x double> %i, double* %j) nou
ret <2 x double> %x
}
-; CHECK-LABEL: test_mask_broadcast_vaddpd_128
-; CHECK: vaddpd (%rdi){1to2}, %xmm{{.*{%k[1-7]}.*}}
-; CHECK: ret
-define <2 x double> @test_mask_broadcast_vaddpd_128(<2 x double> %dst, <2 x double> %i,
- double* %j, <2 x i64> %mask1)
- nounwind {
+define <2 x double> @test_mask_broadcast_vaddpd_128(<2 x double> %dst, <2 x double> %i, double* %j, <2 x i64> %mask1) nounwind {
+; CHECK-LABEL: test_mask_broadcast_vaddpd_128:
+; CHECK: ## BB#0:
+; CHECK-NEXT: vpxor %xmm0, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0xef,0xc0]
+; CHECK-NEXT: vpcmpneqq %xmm0, %xmm2, %k1 ## encoding: [0x62,0xf3,0xed,0x08,0x1f,0xc8,0x04]
+; CHECK-NEXT: vaddpd (%rdi){1to2}, %xmm1, %xmm1 {%k1} ## encoding: [0x62,0xf1,0xf5,0x19,0x58,0x0f]
+; CHECK-NEXT: vmovapd %xmm1, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0x28,0xc1]
+; CHECK-NEXT: retq ## encoding: [0xc3]
%mask = icmp ne <2 x i64> %mask1, zeroinitializer
%tmp = load double, double* %j
%j.0 = insertelement <2 x double> undef, double %tmp, i64 0
@@ -779,11 +893,13 @@ define <2 x double> @test_mask_broadcast_vaddpd_128(<2 x double> %dst, <2 x doub
ret <2 x double> %r
}
-; CHECK-LABEL: test_maskz_broadcast_vaddpd_128
-; CHECK: vaddpd (%rdi){1to2}, %xmm{{.*{%k[1-7]} {z}.*}}
-; CHECK: ret
-define <2 x double> @test_maskz_broadcast_vaddpd_128(<2 x double> %i, double* %j,
- <2 x i64> %mask1) nounwind {
+define <2 x double> @test_maskz_broadcast_vaddpd_128(<2 x double> %i, double* %j, <2 x i64> %mask1) nounwind {
+; CHECK-LABEL: test_maskz_broadcast_vaddpd_128:
+; CHECK: ## BB#0:
+; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2 ## EVEX TO VEX Compression encoding: [0xc5,0xe9,0xef,0xd2]
+; CHECK-NEXT: vpcmpneqq %xmm2, %xmm1, %k1 ## encoding: [0x62,0xf3,0xf5,0x08,0x1f,0xca,0x04]
+; CHECK-NEXT: vaddpd (%rdi){1to2}, %xmm0, %xmm0 {%k1} {z} ## encoding: [0x62,0xf1,0xfd,0x99,0x58,0x07]
+; CHECK-NEXT: retq ## encoding: [0xc3]
%mask = icmp ne <2 x i64> %mask1, zeroinitializer
%tmp = load double, double* %j
%j.0 = insertelement <2 x double> undef, double %tmp, i64 0
diff --git a/test/CodeGen/X86/branchfolding-undef.mir b/test/CodeGen/X86/branchfolding-undef.mir
index 0da167b33257..1a7dfb941875 100644
--- a/test/CodeGen/X86/branchfolding-undef.mir
+++ b/test/CodeGen/X86/branchfolding-undef.mir
@@ -16,7 +16,6 @@ name: func
tracksRegLiveness: true
body: |
bb.0:
- successors: %bb.1, %bb.2
JE_1 %bb.1, implicit undef %eflags
JMP_1 %bb.2
diff --git a/test/CodeGen/X86/build-vector-128.ll b/test/CodeGen/X86/build-vector-128.ll
new file mode 100644
index 000000000000..8c3a6790ffa6
--- /dev/null
+++ b/test/CodeGen/X86/build-vector-128.ll
@@ -0,0 +1,428 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc < %s -mtriple=i686-unknown-unknown -mattr=+sse2 | FileCheck %s --check-prefix=SSE-32 --check-prefix=SSE2-32
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+sse2 | FileCheck %s --check-prefix=SSE-64 --check-prefix=SSE2-64
+; RUN: llc < %s -mtriple=i686-unknown-unknown -mattr=+sse4.1 | FileCheck %s --check-prefix=SSE-32 --check-prefix=SSE41-32
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+sse4.1 | FileCheck %s --check-prefix=SSE-64 --check-prefix=SSE41-64
+; RUN: llc < %s -mtriple=i686-unknown-unknown -mattr=+avx | FileCheck %s --check-prefix=AVX-32 --check-prefix=AVX1-32
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx | FileCheck %s --check-prefix=AVX-64 --check-prefix=AVX1-64
+; RUN: llc < %s -mtriple=i686-unknown-unknown -mattr=+avx2 | FileCheck %s --check-prefix=AVX-32 --check-prefix=AVX2-32
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx2 | FileCheck %s --check-prefix=AVX-64 --check-prefix=AVX2-64
+
+define <2 x double> @test_buildvector_v2f64(double %a0, double %a1) {
+; SSE-32-LABEL: test_buildvector_v2f64:
+; SSE-32: # BB#0:
+; SSE-32-NEXT: movups {{[0-9]+}}(%esp), %xmm0
+; SSE-32-NEXT: retl
+;
+; SSE-64-LABEL: test_buildvector_v2f64:
+; SSE-64: # BB#0:
+; SSE-64-NEXT: unpcklpd {{.*#+}} xmm0 = xmm0[0],xmm1[0]
+; SSE-64-NEXT: retq
+;
+; AVX-32-LABEL: test_buildvector_v2f64:
+; AVX-32: # BB#0:
+; AVX-32-NEXT: vmovups {{[0-9]+}}(%esp), %xmm0
+; AVX-32-NEXT: retl
+;
+; AVX-64-LABEL: test_buildvector_v2f64:
+; AVX-64: # BB#0:
+; AVX-64-NEXT: vunpcklpd {{.*#+}} xmm0 = xmm0[0],xmm1[0]
+; AVX-64-NEXT: retq
+ %ins0 = insertelement <2 x double> undef, double %a0, i32 0
+ %ins1 = insertelement <2 x double> %ins0, double %a1, i32 1
+ ret <2 x double> %ins1
+}
+
+define <4 x float> @test_buildvector_v4f32(float %a0, float %a1, float %a2, float %a3) {
+; SSE-32-LABEL: test_buildvector_v4f32:
+; SSE-32: # BB#0:
+; SSE-32-NEXT: movups {{[0-9]+}}(%esp), %xmm0
+; SSE-32-NEXT: retl
+;
+; SSE2-64-LABEL: test_buildvector_v4f32:
+; SSE2-64: # BB#0:
+; SSE2-64-NEXT: unpcklps {{.*#+}} xmm1 = xmm1[0],xmm3[0],xmm1[1],xmm3[1]
+; SSE2-64-NEXT: unpcklps {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1]
+; SSE2-64-NEXT: unpcklps {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
+; SSE2-64-NEXT: retq
+;
+; SSE41-64-LABEL: test_buildvector_v4f32:
+; SSE41-64: # BB#0:
+; SSE41-64-NEXT: insertps {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[2,3]
+; SSE41-64-NEXT: insertps {{.*#+}} xmm0 = xmm0[0,1],xmm2[0],xmm0[3]
+; SSE41-64-NEXT: insertps {{.*#+}} xmm0 = xmm0[0,1,2],xmm3[0]
+; SSE41-64-NEXT: retq
+;
+; AVX-32-LABEL: test_buildvector_v4f32:
+; AVX-32: # BB#0:
+; AVX-32-NEXT: vmovups {{[0-9]+}}(%esp), %xmm0
+; AVX-32-NEXT: retl
+;
+; AVX-64-LABEL: test_buildvector_v4f32:
+; AVX-64: # BB#0:
+; AVX-64-NEXT: vinsertps {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[2,3]
+; AVX-64-NEXT: vinsertps {{.*#+}} xmm0 = xmm0[0,1],xmm2[0],xmm0[3]
+; AVX-64-NEXT: vinsertps {{.*#+}} xmm0 = xmm0[0,1,2],xmm3[0]
+; AVX-64-NEXT: retq
+ %ins0 = insertelement <4 x float> undef, float %a0, i32 0
+ %ins1 = insertelement <4 x float> %ins0, float %a1, i32 1
+ %ins2 = insertelement <4 x float> %ins1, float %a2, i32 2
+ %ins3 = insertelement <4 x float> %ins2, float %a3, i32 3
+ ret <4 x float> %ins3
+}
+
+define <2 x i64> @test_buildvector_v2i64(i64 %a0, i64 %a1) {
+; SSE2-32-LABEL: test_buildvector_v2i64:
+; SSE2-32: # BB#0:
+; SSE2-32-NEXT: movd {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; SSE2-32-NEXT: movd {{.*#+}} xmm1 = mem[0],zero,zero,zero
+; SSE2-32-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
+; SSE2-32-NEXT: movd {{.*#+}} xmm2 = mem[0],zero,zero,zero
+; SSE2-32-NEXT: movd {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; SSE2-32-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1]
+; SSE2-32-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
+; SSE2-32-NEXT: retl
+;
+; SSE-64-LABEL: test_buildvector_v2i64:
+; SSE-64: # BB#0:
+; SSE-64-NEXT: movq %rsi, %xmm1
+; SSE-64-NEXT: movq %rdi, %xmm0
+; SSE-64-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
+; SSE-64-NEXT: retq
+;
+; SSE41-32-LABEL: test_buildvector_v2i64:
+; SSE41-32: # BB#0:
+; SSE41-32-NEXT: movd {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; SSE41-32-NEXT: pinsrd $1, {{[0-9]+}}(%esp), %xmm0
+; SSE41-32-NEXT: pinsrd $2, {{[0-9]+}}(%esp), %xmm0
+; SSE41-32-NEXT: pinsrd $3, {{[0-9]+}}(%esp), %xmm0
+; SSE41-32-NEXT: retl
+;
+; AVX-32-LABEL: test_buildvector_v2i64:
+; AVX-32: # BB#0:
+; AVX-32-NEXT: vmovd {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; AVX-32-NEXT: vpinsrd $1, {{[0-9]+}}(%esp), %xmm0, %xmm0
+; AVX-32-NEXT: vpinsrd $2, {{[0-9]+}}(%esp), %xmm0, %xmm0
+; AVX-32-NEXT: vpinsrd $3, {{[0-9]+}}(%esp), %xmm0, %xmm0
+; AVX-32-NEXT: retl
+;
+; AVX-64-LABEL: test_buildvector_v2i64:
+; AVX-64: # BB#0:
+; AVX-64-NEXT: vmovq %rsi, %xmm0
+; AVX-64-NEXT: vmovq %rdi, %xmm1
+; AVX-64-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm1[0],xmm0[0]
+; AVX-64-NEXT: retq
+ %ins0 = insertelement <2 x i64> undef, i64 %a0, i32 0
+ %ins1 = insertelement <2 x i64> %ins0, i64 %a1, i32 1
+ ret <2 x i64> %ins1
+}
+
+define <4 x i32> @test_buildvector_v4i32(i32 %f0, i32 %f1, i32 %f2, i32 %f3) {
+; SSE-32-LABEL: test_buildvector_v4i32:
+; SSE-32: # BB#0:
+; SSE-32-NEXT: movups {{[0-9]+}}(%esp), %xmm0
+; SSE-32-NEXT: retl
+;
+; SSE2-64-LABEL: test_buildvector_v4i32:
+; SSE2-64: # BB#0:
+; SSE2-64-NEXT: movd %ecx, %xmm0
+; SSE2-64-NEXT: movd %esi, %xmm1
+; SSE2-64-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
+; SSE2-64-NEXT: movd %edx, %xmm2
+; SSE2-64-NEXT: movd %edi, %xmm0
+; SSE2-64-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1]
+; SSE2-64-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
+; SSE2-64-NEXT: retq
+;
+; SSE41-64-LABEL: test_buildvector_v4i32:
+; SSE41-64: # BB#0:
+; SSE41-64-NEXT: movd %edi, %xmm0
+; SSE41-64-NEXT: pinsrd $1, %esi, %xmm0
+; SSE41-64-NEXT: pinsrd $2, %edx, %xmm0
+; SSE41-64-NEXT: pinsrd $3, %ecx, %xmm0
+; SSE41-64-NEXT: retq
+;
+; AVX-32-LABEL: test_buildvector_v4i32:
+; AVX-32: # BB#0:
+; AVX-32-NEXT: vmovups {{[0-9]+}}(%esp), %xmm0
+; AVX-32-NEXT: retl
+;
+; AVX-64-LABEL: test_buildvector_v4i32:
+; AVX-64: # BB#0:
+; AVX-64-NEXT: vmovd %edi, %xmm0
+; AVX-64-NEXT: vpinsrd $1, %esi, %xmm0, %xmm0
+; AVX-64-NEXT: vpinsrd $2, %edx, %xmm0, %xmm0
+; AVX-64-NEXT: vpinsrd $3, %ecx, %xmm0, %xmm0
+; AVX-64-NEXT: retq
+ %ins0 = insertelement <4 x i32> undef, i32 %f0, i32 0
+ %ins1 = insertelement <4 x i32> %ins0, i32 %f1, i32 1
+ %ins2 = insertelement <4 x i32> %ins1, i32 %f2, i32 2
+ %ins3 = insertelement <4 x i32> %ins2, i32 %f3, i32 3
+ ret <4 x i32> %ins3
+}
+
+define <8 x i16> @test_buildvector_v8i16(i16 %a0, i16 %a1, i16 %a2, i16 %a3, i16 %a4, i16 %a5, i16 %a6, i16 %a7) {
+; SSE2-32-LABEL: test_buildvector_v8i16:
+; SSE2-32: # BB#0:
+; SSE2-32-NEXT: movd {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; SSE2-32-NEXT: movd {{.*#+}} xmm1 = mem[0],zero,zero,zero
+; SSE2-32-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
+; SSE2-32-NEXT: movd {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; SSE2-32-NEXT: movd {{.*#+}} xmm2 = mem[0],zero,zero,zero
+; SSE2-32-NEXT: punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1],xmm2[2],xmm0[2],xmm2[3],xmm0[3]
+; SSE2-32-NEXT: punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1],xmm2[2],xmm1[2],xmm2[3],xmm1[3]
+; SSE2-32-NEXT: movd {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; SSE2-32-NEXT: movd {{.*#+}} xmm1 = mem[0],zero,zero,zero
+; SSE2-32-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
+; SSE2-32-NEXT: movd {{.*#+}} xmm3 = mem[0],zero,zero,zero
+; SSE2-32-NEXT: movd {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; SSE2-32-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm3[0],xmm0[1],xmm3[1],xmm0[2],xmm3[2],xmm0[3],xmm3[3]
+; SSE2-32-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3]
+; SSE2-32-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3]
+; SSE2-32-NEXT: retl
+;
+; SSE2-64-LABEL: test_buildvector_v8i16:
+; SSE2-64: # BB#0:
+; SSE2-64-NEXT: movd %ecx, %xmm0
+; SSE2-64-NEXT: movd {{.*#+}} xmm1 = mem[0],zero,zero,zero
+; SSE2-64-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3]
+; SSE2-64-NEXT: movd %r9d, %xmm1
+; SSE2-64-NEXT: movd %esi, %xmm2
+; SSE2-64-NEXT: punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1],xmm2[2],xmm1[2],xmm2[3],xmm1[3]
+; SSE2-64-NEXT: punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1],xmm2[2],xmm0[2],xmm2[3],xmm0[3]
+; SSE2-64-NEXT: movd %edx, %xmm1
+; SSE2-64-NEXT: movd {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; SSE2-64-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
+; SSE2-64-NEXT: movd %r8d, %xmm3
+; SSE2-64-NEXT: movd %edi, %xmm0
+; SSE2-64-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm3[0],xmm0[1],xmm3[1],xmm0[2],xmm3[2],xmm0[3],xmm3[3]
+; SSE2-64-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3]
+; SSE2-64-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3]
+; SSE2-64-NEXT: retq
+;
+; SSE41-32-LABEL: test_buildvector_v8i16:
+; SSE41-32: # BB#0:
+; SSE41-32-NEXT: movd {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; SSE41-32-NEXT: pinsrw $1, {{[0-9]+}}(%esp), %xmm0
+; SSE41-32-NEXT: pinsrw $2, {{[0-9]+}}(%esp), %xmm0
+; SSE41-32-NEXT: pinsrw $3, {{[0-9]+}}(%esp), %xmm0
+; SSE41-32-NEXT: pinsrw $4, {{[0-9]+}}(%esp), %xmm0
+; SSE41-32-NEXT: pinsrw $5, {{[0-9]+}}(%esp), %xmm0
+; SSE41-32-NEXT: pinsrw $6, {{[0-9]+}}(%esp), %xmm0
+; SSE41-32-NEXT: pinsrw $7, {{[0-9]+}}(%esp), %xmm0
+; SSE41-32-NEXT: retl
+;
+; SSE41-64-LABEL: test_buildvector_v8i16:
+; SSE41-64: # BB#0:
+; SSE41-64-NEXT: movd %edi, %xmm0
+; SSE41-64-NEXT: pinsrw $1, %esi, %xmm0
+; SSE41-64-NEXT: pinsrw $2, %edx, %xmm0
+; SSE41-64-NEXT: pinsrw $3, %ecx, %xmm0
+; SSE41-64-NEXT: pinsrw $4, %r8d, %xmm0
+; SSE41-64-NEXT: pinsrw $5, %r9d, %xmm0
+; SSE41-64-NEXT: pinsrw $6, {{[0-9]+}}(%rsp), %xmm0
+; SSE41-64-NEXT: pinsrw $7, {{[0-9]+}}(%rsp), %xmm0
+; SSE41-64-NEXT: retq
+;
+; AVX-32-LABEL: test_buildvector_v8i16:
+; AVX-32: # BB#0:
+; AVX-32-NEXT: vmovd {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; AVX-32-NEXT: vpinsrw $1, {{[0-9]+}}(%esp), %xmm0, %xmm0
+; AVX-32-NEXT: vpinsrw $2, {{[0-9]+}}(%esp), %xmm0, %xmm0
+; AVX-32-NEXT: vpinsrw $3, {{[0-9]+}}(%esp), %xmm0, %xmm0
+; AVX-32-NEXT: vpinsrw $4, {{[0-9]+}}(%esp), %xmm0, %xmm0
+; AVX-32-NEXT: vpinsrw $5, {{[0-9]+}}(%esp), %xmm0, %xmm0
+; AVX-32-NEXT: vpinsrw $6, {{[0-9]+}}(%esp), %xmm0, %xmm0
+; AVX-32-NEXT: vpinsrw $7, {{[0-9]+}}(%esp), %xmm0, %xmm0
+; AVX-32-NEXT: retl
+;
+; AVX-64-LABEL: test_buildvector_v8i16:
+; AVX-64: # BB#0:
+; AVX-64-NEXT: vmovd %edi, %xmm0
+; AVX-64-NEXT: vpinsrw $1, %esi, %xmm0, %xmm0
+; AVX-64-NEXT: vpinsrw $2, %edx, %xmm0, %xmm0
+; AVX-64-NEXT: vpinsrw $3, %ecx, %xmm0, %xmm0
+; AVX-64-NEXT: vpinsrw $4, %r8d, %xmm0, %xmm0
+; AVX-64-NEXT: vpinsrw $5, %r9d, %xmm0, %xmm0
+; AVX-64-NEXT: vpinsrw $6, {{[0-9]+}}(%rsp), %xmm0, %xmm0
+; AVX-64-NEXT: vpinsrw $7, {{[0-9]+}}(%rsp), %xmm0, %xmm0
+; AVX-64-NEXT: retq
+ %ins0 = insertelement <8 x i16> undef, i16 %a0, i32 0
+ %ins1 = insertelement <8 x i16> %ins0, i16 %a1, i32 1
+ %ins2 = insertelement <8 x i16> %ins1, i16 %a2, i32 2
+ %ins3 = insertelement <8 x i16> %ins2, i16 %a3, i32 3
+ %ins4 = insertelement <8 x i16> %ins3, i16 %a4, i32 4
+ %ins5 = insertelement <8 x i16> %ins4, i16 %a5, i32 5
+ %ins6 = insertelement <8 x i16> %ins5, i16 %a6, i32 6
+ %ins7 = insertelement <8 x i16> %ins6, i16 %a7, i32 7
+ ret <8 x i16> %ins7
+}
+
+define <16 x i8> @test_buildvector_v16i8(i8 %a0, i8 %a1, i8 %a2, i8 %a3, i8 %a4, i8 %a5, i8 %a6, i8 %a7, i8 %a8, i8 %a9, i8 %a10, i8 %a11, i8 %a12, i8 %a13, i8 %a14, i8 %a15) {
+; SSE2-32-LABEL: test_buildvector_v16i8:
+; SSE2-32: # BB#0:
+; SSE2-32-NEXT: movd {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; SSE2-32-NEXT: movd {{.*#+}} xmm1 = mem[0],zero,zero,zero
+; SSE2-32-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7]
+; SSE2-32-NEXT: movd {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; SSE2-32-NEXT: movd {{.*#+}} xmm2 = mem[0],zero,zero,zero
+; SSE2-32-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1],xmm2[2],xmm0[2],xmm2[3],xmm0[3],xmm2[4],xmm0[4],xmm2[5],xmm0[5],xmm2[6],xmm0[6],xmm2[7],xmm0[7]
+; SSE2-32-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1],xmm2[2],xmm1[2],xmm2[3],xmm1[3],xmm2[4],xmm1[4],xmm2[5],xmm1[5],xmm2[6],xmm1[6],xmm2[7],xmm1[7]
+; SSE2-32-NEXT: movd {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; SSE2-32-NEXT: movd {{.*#+}} xmm1 = mem[0],zero,zero,zero
+; SSE2-32-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7]
+; SSE2-32-NEXT: movd {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; SSE2-32-NEXT: movd {{.*#+}} xmm3 = mem[0],zero,zero,zero
+; SSE2-32-NEXT: punpcklbw {{.*#+}} xmm3 = xmm3[0],xmm0[0],xmm3[1],xmm0[1],xmm3[2],xmm0[2],xmm3[3],xmm0[3],xmm3[4],xmm0[4],xmm3[5],xmm0[5],xmm3[6],xmm0[6],xmm3[7],xmm0[7]
+; SSE2-32-NEXT: punpcklbw {{.*#+}} xmm3 = xmm3[0],xmm1[0],xmm3[1],xmm1[1],xmm3[2],xmm1[2],xmm3[3],xmm1[3],xmm3[4],xmm1[4],xmm3[5],xmm1[5],xmm3[6],xmm1[6],xmm3[7],xmm1[7]
+; SSE2-32-NEXT: punpcklbw {{.*#+}} xmm3 = xmm3[0],xmm2[0],xmm3[1],xmm2[1],xmm3[2],xmm2[2],xmm3[3],xmm2[3],xmm3[4],xmm2[4],xmm3[5],xmm2[5],xmm3[6],xmm2[6],xmm3[7],xmm2[7]
+; SSE2-32-NEXT: movd {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; SSE2-32-NEXT: movd {{.*#+}} xmm1 = mem[0],zero,zero,zero
+; SSE2-32-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7]
+; SSE2-32-NEXT: movd {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; SSE2-32-NEXT: movd {{.*#+}} xmm2 = mem[0],zero,zero,zero
+; SSE2-32-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1],xmm2[2],xmm0[2],xmm2[3],xmm0[3],xmm2[4],xmm0[4],xmm2[5],xmm0[5],xmm2[6],xmm0[6],xmm2[7],xmm0[7]
+; SSE2-32-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1],xmm2[2],xmm1[2],xmm2[3],xmm1[3],xmm2[4],xmm1[4],xmm2[5],xmm1[5],xmm2[6],xmm1[6],xmm2[7],xmm1[7]
+; SSE2-32-NEXT: movd {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; SSE2-32-NEXT: movd {{.*#+}} xmm1 = mem[0],zero,zero,zero
+; SSE2-32-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7]
+; SSE2-32-NEXT: movd {{.*#+}} xmm4 = mem[0],zero,zero,zero
+; SSE2-32-NEXT: movd {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; SSE2-32-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm4[0],xmm0[1],xmm4[1],xmm0[2],xmm4[2],xmm0[3],xmm4[3],xmm0[4],xmm4[4],xmm0[5],xmm4[5],xmm0[6],xmm4[6],xmm0[7],xmm4[7]
+; SSE2-32-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7]
+; SSE2-32-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3],xmm0[4],xmm2[4],xmm0[5],xmm2[5],xmm0[6],xmm2[6],xmm0[7],xmm2[7]
+; SSE2-32-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm3[0],xmm0[1],xmm3[1],xmm0[2],xmm3[2],xmm0[3],xmm3[3],xmm0[4],xmm3[4],xmm0[5],xmm3[5],xmm0[6],xmm3[6],xmm0[7],xmm3[7]
+; SSE2-32-NEXT: retl
+;
+; SSE2-64-LABEL: test_buildvector_v16i8:
+; SSE2-64: # BB#0:
+; SSE2-64-NEXT: movd {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; SSE2-64-NEXT: movd {{.*#+}} xmm1 = mem[0],zero,zero,zero
+; SSE2-64-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7]
+; SSE2-64-NEXT: movd %ecx, %xmm0
+; SSE2-64-NEXT: movd {{.*#+}} xmm2 = mem[0],zero,zero,zero
+; SSE2-64-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3],xmm0[4],xmm2[4],xmm0[5],xmm2[5],xmm0[6],xmm2[6],xmm0[7],xmm2[7]
+; SSE2-64-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7]
+; SSE2-64-NEXT: movd %r9d, %xmm1
+; SSE2-64-NEXT: movd {{.*#+}} xmm2 = mem[0],zero,zero,zero
+; SSE2-64-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1],xmm1[2],xmm2[2],xmm1[3],xmm2[3],xmm1[4],xmm2[4],xmm1[5],xmm2[5],xmm1[6],xmm2[6],xmm1[7],xmm2[7]
+; SSE2-64-NEXT: movd %esi, %xmm2
+; SSE2-64-NEXT: movd {{.*#+}} xmm3 = mem[0],zero,zero,zero
+; SSE2-64-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm3[0],xmm2[1],xmm3[1],xmm2[2],xmm3[2],xmm2[3],xmm3[3],xmm2[4],xmm3[4],xmm2[5],xmm3[5],xmm2[6],xmm3[6],xmm2[7],xmm3[7]
+; SSE2-64-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1],xmm2[2],xmm1[2],xmm2[3],xmm1[3],xmm2[4],xmm1[4],xmm2[5],xmm1[5],xmm2[6],xmm1[6],xmm2[7],xmm1[7]
+; SSE2-64-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1],xmm2[2],xmm0[2],xmm2[3],xmm0[3],xmm2[4],xmm0[4],xmm2[5],xmm0[5],xmm2[6],xmm0[6],xmm2[7],xmm0[7]
+; SSE2-64-NEXT: movd {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; SSE2-64-NEXT: movd {{.*#+}} xmm1 = mem[0],zero,zero,zero
+; SSE2-64-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7]
+; SSE2-64-NEXT: movd %edx, %xmm3
+; SSE2-64-NEXT: movd {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; SSE2-64-NEXT: punpcklbw {{.*#+}} xmm3 = xmm3[0],xmm0[0],xmm3[1],xmm0[1],xmm3[2],xmm0[2],xmm3[3],xmm0[3],xmm3[4],xmm0[4],xmm3[5],xmm0[5],xmm3[6],xmm0[6],xmm3[7],xmm0[7]
+; SSE2-64-NEXT: punpcklbw {{.*#+}} xmm3 = xmm3[0],xmm1[0],xmm3[1],xmm1[1],xmm3[2],xmm1[2],xmm3[3],xmm1[3],xmm3[4],xmm1[4],xmm3[5],xmm1[5],xmm3[6],xmm1[6],xmm3[7],xmm1[7]
+; SSE2-64-NEXT: movd %r8d, %xmm1
+; SSE2-64-NEXT: movd {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; SSE2-64-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7]
+; SSE2-64-NEXT: movd %edi, %xmm0
+; SSE2-64-NEXT: movd {{.*#+}} xmm4 = mem[0],zero,zero,zero
+; SSE2-64-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm4[0],xmm0[1],xmm4[1],xmm0[2],xmm4[2],xmm0[3],xmm4[3],xmm0[4],xmm4[4],xmm0[5],xmm4[5],xmm0[6],xmm4[6],xmm0[7],xmm4[7]
+; SSE2-64-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7]
+; SSE2-64-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm3[0],xmm0[1],xmm3[1],xmm0[2],xmm3[2],xmm0[3],xmm3[3],xmm0[4],xmm3[4],xmm0[5],xmm3[5],xmm0[6],xmm3[6],xmm0[7],xmm3[7]
+; SSE2-64-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3],xmm0[4],xmm2[4],xmm0[5],xmm2[5],xmm0[6],xmm2[6],xmm0[7],xmm2[7]
+; SSE2-64-NEXT: retq
+;
+; SSE41-32-LABEL: test_buildvector_v16i8:
+; SSE41-32: # BB#0:
+; SSE41-32-NEXT: movd {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; SSE41-32-NEXT: pinsrb $1, {{[0-9]+}}(%esp), %xmm0
+; SSE41-32-NEXT: pinsrb $2, {{[0-9]+}}(%esp), %xmm0
+; SSE41-32-NEXT: pinsrb $3, {{[0-9]+}}(%esp), %xmm0
+; SSE41-32-NEXT: pinsrb $4, {{[0-9]+}}(%esp), %xmm0
+; SSE41-32-NEXT: pinsrb $5, {{[0-9]+}}(%esp), %xmm0
+; SSE41-32-NEXT: pinsrb $6, {{[0-9]+}}(%esp), %xmm0
+; SSE41-32-NEXT: pinsrb $7, {{[0-9]+}}(%esp), %xmm0
+; SSE41-32-NEXT: pinsrb $8, {{[0-9]+}}(%esp), %xmm0
+; SSE41-32-NEXT: pinsrb $9, {{[0-9]+}}(%esp), %xmm0
+; SSE41-32-NEXT: pinsrb $10, {{[0-9]+}}(%esp), %xmm0
+; SSE41-32-NEXT: pinsrb $11, {{[0-9]+}}(%esp), %xmm0
+; SSE41-32-NEXT: pinsrb $12, {{[0-9]+}}(%esp), %xmm0
+; SSE41-32-NEXT: pinsrb $13, {{[0-9]+}}(%esp), %xmm0
+; SSE41-32-NEXT: pinsrb $14, {{[0-9]+}}(%esp), %xmm0
+; SSE41-32-NEXT: pinsrb $15, {{[0-9]+}}(%esp), %xmm0
+; SSE41-32-NEXT: retl
+;
+; SSE41-64-LABEL: test_buildvector_v16i8:
+; SSE41-64: # BB#0:
+; SSE41-64-NEXT: movd %edi, %xmm0
+; SSE41-64-NEXT: pinsrb $1, %esi, %xmm0
+; SSE41-64-NEXT: pinsrb $2, %edx, %xmm0
+; SSE41-64-NEXT: pinsrb $3, %ecx, %xmm0
+; SSE41-64-NEXT: pinsrb $4, %r8d, %xmm0
+; SSE41-64-NEXT: pinsrb $5, %r9d, %xmm0
+; SSE41-64-NEXT: pinsrb $6, {{[0-9]+}}(%rsp), %xmm0
+; SSE41-64-NEXT: pinsrb $7, {{[0-9]+}}(%rsp), %xmm0
+; SSE41-64-NEXT: pinsrb $8, {{[0-9]+}}(%rsp), %xmm0
+; SSE41-64-NEXT: pinsrb $9, {{[0-9]+}}(%rsp), %xmm0
+; SSE41-64-NEXT: pinsrb $10, {{[0-9]+}}(%rsp), %xmm0
+; SSE41-64-NEXT: pinsrb $11, {{[0-9]+}}(%rsp), %xmm0
+; SSE41-64-NEXT: pinsrb $12, {{[0-9]+}}(%rsp), %xmm0
+; SSE41-64-NEXT: pinsrb $13, {{[0-9]+}}(%rsp), %xmm0
+; SSE41-64-NEXT: pinsrb $14, {{[0-9]+}}(%rsp), %xmm0
+; SSE41-64-NEXT: pinsrb $15, {{[0-9]+}}(%rsp), %xmm0
+; SSE41-64-NEXT: retq
+;
+; AVX-32-LABEL: test_buildvector_v16i8:
+; AVX-32: # BB#0:
+; AVX-32-NEXT: vmovd {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; AVX-32-NEXT: vpinsrb $1, {{[0-9]+}}(%esp), %xmm0, %xmm0
+; AVX-32-NEXT: vpinsrb $2, {{[0-9]+}}(%esp), %xmm0, %xmm0
+; AVX-32-NEXT: vpinsrb $3, {{[0-9]+}}(%esp), %xmm0, %xmm0
+; AVX-32-NEXT: vpinsrb $4, {{[0-9]+}}(%esp), %xmm0, %xmm0
+; AVX-32-NEXT: vpinsrb $5, {{[0-9]+}}(%esp), %xmm0, %xmm0
+; AVX-32-NEXT: vpinsrb $6, {{[0-9]+}}(%esp), %xmm0, %xmm0
+; AVX-32-NEXT: vpinsrb $7, {{[0-9]+}}(%esp), %xmm0, %xmm0
+; AVX-32-NEXT: vpinsrb $8, {{[0-9]+}}(%esp), %xmm0, %xmm0
+; AVX-32-NEXT: vpinsrb $9, {{[0-9]+}}(%esp), %xmm0, %xmm0
+; AVX-32-NEXT: vpinsrb $10, {{[0-9]+}}(%esp), %xmm0, %xmm0
+; AVX-32-NEXT: vpinsrb $11, {{[0-9]+}}(%esp), %xmm0, %xmm0
+; AVX-32-NEXT: vpinsrb $12, {{[0-9]+}}(%esp), %xmm0, %xmm0
+; AVX-32-NEXT: vpinsrb $13, {{[0-9]+}}(%esp), %xmm0, %xmm0
+; AVX-32-NEXT: vpinsrb $14, {{[0-9]+}}(%esp), %xmm0, %xmm0
+; AVX-32-NEXT: vpinsrb $15, {{[0-9]+}}(%esp), %xmm0, %xmm0
+; AVX-32-NEXT: retl
+;
+; AVX-64-LABEL: test_buildvector_v16i8:
+; AVX-64: # BB#0:
+; AVX-64-NEXT: vmovd %edi, %xmm0
+; AVX-64-NEXT: vpinsrb $1, %esi, %xmm0, %xmm0
+; AVX-64-NEXT: vpinsrb $2, %edx, %xmm0, %xmm0
+; AVX-64-NEXT: vpinsrb $3, %ecx, %xmm0, %xmm0
+; AVX-64-NEXT: vpinsrb $4, %r8d, %xmm0, %xmm0
+; AVX-64-NEXT: vpinsrb $5, %r9d, %xmm0, %xmm0
+; AVX-64-NEXT: vpinsrb $6, {{[0-9]+}}(%rsp), %xmm0, %xmm0
+; AVX-64-NEXT: vpinsrb $7, {{[0-9]+}}(%rsp), %xmm0, %xmm0
+; AVX-64-NEXT: vpinsrb $8, {{[0-9]+}}(%rsp), %xmm0, %xmm0
+; AVX-64-NEXT: vpinsrb $9, {{[0-9]+}}(%rsp), %xmm0, %xmm0
+; AVX-64-NEXT: vpinsrb $10, {{[0-9]+}}(%rsp), %xmm0, %xmm0
+; AVX-64-NEXT: vpinsrb $11, {{[0-9]+}}(%rsp), %xmm0, %xmm0
+; AVX-64-NEXT: vpinsrb $12, {{[0-9]+}}(%rsp), %xmm0, %xmm0
+; AVX-64-NEXT: vpinsrb $13, {{[0-9]+}}(%rsp), %xmm0, %xmm0
+; AVX-64-NEXT: vpinsrb $14, {{[0-9]+}}(%rsp), %xmm0, %xmm0
+; AVX-64-NEXT: vpinsrb $15, {{[0-9]+}}(%rsp), %xmm0, %xmm0
+; AVX-64-NEXT: retq
+ %ins0 = insertelement <16 x i8> undef, i8 %a0, i32 0
+ %ins1 = insertelement <16 x i8> %ins0, i8 %a1, i32 1
+ %ins2 = insertelement <16 x i8> %ins1, i8 %a2, i32 2
+ %ins3 = insertelement <16 x i8> %ins2, i8 %a3, i32 3
+ %ins4 = insertelement <16 x i8> %ins3, i8 %a4, i32 4
+ %ins5 = insertelement <16 x i8> %ins4, i8 %a5, i32 5
+ %ins6 = insertelement <16 x i8> %ins5, i8 %a6, i32 6
+ %ins7 = insertelement <16 x i8> %ins6, i8 %a7, i32 7
+ %ins8 = insertelement <16 x i8> %ins7, i8 %a8, i32 8
+ %ins9 = insertelement <16 x i8> %ins8, i8 %a9, i32 9
+ %ins10 = insertelement <16 x i8> %ins9, i8 %a10, i32 10
+ %ins11 = insertelement <16 x i8> %ins10, i8 %a11, i32 11
+ %ins12 = insertelement <16 x i8> %ins11, i8 %a12, i32 12
+ %ins13 = insertelement <16 x i8> %ins12, i8 %a13, i32 13
+ %ins14 = insertelement <16 x i8> %ins13, i8 %a14, i32 14
+ %ins15 = insertelement <16 x i8> %ins14, i8 %a15, i32 15
+ ret <16 x i8> %ins15
+}
diff --git a/test/CodeGen/X86/build-vector-256.ll b/test/CodeGen/X86/build-vector-256.ll
new file mode 100644
index 000000000000..1ced1fc3a382
--- /dev/null
+++ b/test/CodeGen/X86/build-vector-256.ll
@@ -0,0 +1,434 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc < %s -mtriple=i686-unknown-unknown -mattr=+avx | FileCheck %s --check-prefix=AVX-32 --check-prefix=AVX1-32
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx | FileCheck %s --check-prefix=AVX-64 --check-prefix=AVX1-64
+; RUN: llc < %s -mtriple=i686-unknown-unknown -mattr=+avx2 | FileCheck %s --check-prefix=AVX-32 --check-prefix=AVX2-32
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx2 | FileCheck %s --check-prefix=AVX-64 --check-prefix=AVX2-64
+
+define <4 x double> @test_buildvector_v4f64(double %a0, double %a1, double %a2, double %a3) {
+; AVX-32-LABEL: test_buildvector_v4f64:
+; AVX-32: # BB#0:
+; AVX-32-NEXT: vmovups {{[0-9]+}}(%esp), %ymm0
+; AVX-32-NEXT: retl
+;
+; AVX-64-LABEL: test_buildvector_v4f64:
+; AVX-64: # BB#0:
+; AVX-64-NEXT: vunpcklpd {{.*#+}} xmm2 = xmm2[0],xmm3[0]
+; AVX-64-NEXT: vunpcklpd {{.*#+}} xmm0 = xmm0[0],xmm1[0]
+; AVX-64-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0
+; AVX-64-NEXT: retq
+ %ins0 = insertelement <4 x double> undef, double %a0, i32 0
+ %ins1 = insertelement <4 x double> %ins0, double %a1, i32 1
+ %ins2 = insertelement <4 x double> %ins1, double %a2, i32 2
+ %ins3 = insertelement <4 x double> %ins2, double %a3, i32 3
+ ret <4 x double> %ins3
+}
+
+define <8 x float> @test_buildvector_v8f32(float %a0, float %a1, float %a2, float %a3, float %a4, float %a5, float %a6, float %a7) {
+; AVX-32-LABEL: test_buildvector_v8f32:
+; AVX-32: # BB#0:
+; AVX-32-NEXT: vmovups {{[0-9]+}}(%esp), %ymm0
+; AVX-32-NEXT: retl
+;
+; AVX-64-LABEL: test_buildvector_v8f32:
+; AVX-64: # BB#0:
+; AVX-64-NEXT: vinsertps {{.*#+}} xmm4 = xmm4[0],xmm5[0],xmm4[2,3]
+; AVX-64-NEXT: vinsertps {{.*#+}} xmm4 = xmm4[0,1],xmm6[0],xmm4[3]
+; AVX-64-NEXT: vinsertps {{.*#+}} xmm4 = xmm4[0,1,2],xmm7[0]
+; AVX-64-NEXT: vinsertps {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[2,3]
+; AVX-64-NEXT: vinsertps {{.*#+}} xmm0 = xmm0[0,1],xmm2[0],xmm0[3]
+; AVX-64-NEXT: vinsertps {{.*#+}} xmm0 = xmm0[0,1,2],xmm3[0]
+; AVX-64-NEXT: vinsertf128 $1, %xmm4, %ymm0, %ymm0
+; AVX-64-NEXT: retq
+ %ins0 = insertelement <8 x float> undef, float %a0, i32 0
+ %ins1 = insertelement <8 x float> %ins0, float %a1, i32 1
+ %ins2 = insertelement <8 x float> %ins1, float %a2, i32 2
+ %ins3 = insertelement <8 x float> %ins2, float %a3, i32 3
+ %ins4 = insertelement <8 x float> %ins3, float %a4, i32 4
+ %ins5 = insertelement <8 x float> %ins4, float %a5, i32 5
+ %ins6 = insertelement <8 x float> %ins5, float %a6, i32 6
+ %ins7 = insertelement <8 x float> %ins6, float %a7, i32 7
+ ret <8 x float> %ins7
+}
+
+define <4 x i64> @test_buildvector_v4i64(i64 %a0, i64 %a1, i64 %a2, i64 %a3) {
+; AVX1-32-LABEL: test_buildvector_v4i64:
+; AVX1-32: # BB#0:
+; AVX1-32-NEXT: vmovd {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; AVX1-32-NEXT: vpinsrd $1, {{[0-9]+}}(%esp), %xmm0, %xmm0
+; AVX1-32-NEXT: vpinsrd $2, {{[0-9]+}}(%esp), %xmm0, %xmm0
+; AVX1-32-NEXT: vpinsrd $3, {{[0-9]+}}(%esp), %xmm0, %xmm0
+; AVX1-32-NEXT: vmovd {{.*#+}} xmm1 = mem[0],zero,zero,zero
+; AVX1-32-NEXT: vpinsrd $1, {{[0-9]+}}(%esp), %xmm1, %xmm1
+; AVX1-32-NEXT: vpinsrd $2, {{[0-9]+}}(%esp), %xmm1, %xmm1
+; AVX1-32-NEXT: vpinsrd $3, {{[0-9]+}}(%esp), %xmm1, %xmm1
+; AVX1-32-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
+; AVX1-32-NEXT: retl
+;
+; AVX1-64-LABEL: test_buildvector_v4i64:
+; AVX1-64: # BB#0:
+; AVX1-64-NEXT: vmovq %rcx, %xmm0
+; AVX1-64-NEXT: vmovq %rdx, %xmm1
+; AVX1-64-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm1[0],xmm0[0]
+; AVX1-64-NEXT: vmovq %rsi, %xmm1
+; AVX1-64-NEXT: vmovq %rdi, %xmm2
+; AVX1-64-NEXT: vpunpcklqdq {{.*#+}} xmm1 = xmm2[0],xmm1[0]
+; AVX1-64-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
+; AVX1-64-NEXT: retq
+;
+; AVX2-32-LABEL: test_buildvector_v4i64:
+; AVX2-32: # BB#0:
+; AVX2-32-NEXT: vmovd {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; AVX2-32-NEXT: vpinsrd $1, {{[0-9]+}}(%esp), %xmm0, %xmm0
+; AVX2-32-NEXT: vpinsrd $2, {{[0-9]+}}(%esp), %xmm0, %xmm0
+; AVX2-32-NEXT: vpinsrd $3, {{[0-9]+}}(%esp), %xmm0, %xmm0
+; AVX2-32-NEXT: vmovd {{.*#+}} xmm1 = mem[0],zero,zero,zero
+; AVX2-32-NEXT: vpinsrd $1, {{[0-9]+}}(%esp), %xmm1, %xmm1
+; AVX2-32-NEXT: vpinsrd $2, {{[0-9]+}}(%esp), %xmm1, %xmm1
+; AVX2-32-NEXT: vpinsrd $3, {{[0-9]+}}(%esp), %xmm1, %xmm1
+; AVX2-32-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm0
+; AVX2-32-NEXT: retl
+;
+; AVX2-64-LABEL: test_buildvector_v4i64:
+; AVX2-64: # BB#0:
+; AVX2-64-NEXT: vmovq %rcx, %xmm0
+; AVX2-64-NEXT: vmovq %rdx, %xmm1
+; AVX2-64-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm1[0],xmm0[0]
+; AVX2-64-NEXT: vmovq %rsi, %xmm1
+; AVX2-64-NEXT: vmovq %rdi, %xmm2
+; AVX2-64-NEXT: vpunpcklqdq {{.*#+}} xmm1 = xmm2[0],xmm1[0]
+; AVX2-64-NEXT: vinserti128 $1, %xmm0, %ymm1, %ymm0
+; AVX2-64-NEXT: retq
+ %ins0 = insertelement <4 x i64> undef, i64 %a0, i32 0
+ %ins1 = insertelement <4 x i64> %ins0, i64 %a1, i32 1
+ %ins2 = insertelement <4 x i64> %ins1, i64 %a2, i32 2
+ %ins3 = insertelement <4 x i64> %ins2, i64 %a3, i32 3
+ ret <4 x i64> %ins3
+}
+
+define <8 x i32> @test_buildvector_v8i32(i32 %a0, i32 %a1, i32 %a2, i32 %a3, i32 %a4, i32 %a5, i32 %a6, i32 %a7) {
+; AVX-32-LABEL: test_buildvector_v8i32:
+; AVX-32: # BB#0:
+; AVX-32-NEXT: vmovups {{[0-9]+}}(%esp), %ymm0
+; AVX-32-NEXT: retl
+;
+; AVX1-64-LABEL: test_buildvector_v8i32:
+; AVX1-64: # BB#0:
+; AVX1-64-NEXT: vmovd %edi, %xmm0
+; AVX1-64-NEXT: vpinsrd $1, %esi, %xmm0, %xmm0
+; AVX1-64-NEXT: vpinsrd $2, %edx, %xmm0, %xmm0
+; AVX1-64-NEXT: vpinsrd $3, %ecx, %xmm0, %xmm0
+; AVX1-64-NEXT: vmovd %r8d, %xmm1
+; AVX1-64-NEXT: vpinsrd $1, %r9d, %xmm1, %xmm1
+; AVX1-64-NEXT: vpinsrd $2, {{[0-9]+}}(%rsp), %xmm1, %xmm1
+; AVX1-64-NEXT: vpinsrd $3, {{[0-9]+}}(%rsp), %xmm1, %xmm1
+; AVX1-64-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
+; AVX1-64-NEXT: retq
+;
+; AVX2-64-LABEL: test_buildvector_v8i32:
+; AVX2-64: # BB#0:
+; AVX2-64-NEXT: vmovd %edi, %xmm0
+; AVX2-64-NEXT: vpinsrd $1, %esi, %xmm0, %xmm0
+; AVX2-64-NEXT: vpinsrd $2, %edx, %xmm0, %xmm0
+; AVX2-64-NEXT: vpinsrd $3, %ecx, %xmm0, %xmm0
+; AVX2-64-NEXT: vmovd %r8d, %xmm1
+; AVX2-64-NEXT: vpinsrd $1, %r9d, %xmm1, %xmm1
+; AVX2-64-NEXT: vpinsrd $2, {{[0-9]+}}(%rsp), %xmm1, %xmm1
+; AVX2-64-NEXT: vpinsrd $3, {{[0-9]+}}(%rsp), %xmm1, %xmm1
+; AVX2-64-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm0
+; AVX2-64-NEXT: retq
+ %ins0 = insertelement <8 x i32> undef, i32 %a0, i32 0
+ %ins1 = insertelement <8 x i32> %ins0, i32 %a1, i32 1
+ %ins2 = insertelement <8 x i32> %ins1, i32 %a2, i32 2
+ %ins3 = insertelement <8 x i32> %ins2, i32 %a3, i32 3
+ %ins4 = insertelement <8 x i32> %ins3, i32 %a4, i32 4
+ %ins5 = insertelement <8 x i32> %ins4, i32 %a5, i32 5
+ %ins6 = insertelement <8 x i32> %ins5, i32 %a6, i32 6
+ %ins7 = insertelement <8 x i32> %ins6, i32 %a7, i32 7
+ ret <8 x i32> %ins7
+}
+
+define <16 x i16> @test_buildvector_v16i16(i16 %a0, i16 %a1, i16 %a2, i16 %a3, i16 %a4, i16 %a5, i16 %a6, i16 %a7, i16 %a8, i16 %a9, i16 %a10, i16 %a11, i16 %a12, i16 %a13, i16 %a14, i16 %a15) {
+; AVX1-32-LABEL: test_buildvector_v16i16:
+; AVX1-32: # BB#0:
+; AVX1-32-NEXT: vmovd {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; AVX1-32-NEXT: vpinsrw $1, {{[0-9]+}}(%esp), %xmm0, %xmm0
+; AVX1-32-NEXT: vpinsrw $2, {{[0-9]+}}(%esp), %xmm0, %xmm0
+; AVX1-32-NEXT: vpinsrw $3, {{[0-9]+}}(%esp), %xmm0, %xmm0
+; AVX1-32-NEXT: vpinsrw $4, {{[0-9]+}}(%esp), %xmm0, %xmm0
+; AVX1-32-NEXT: vpinsrw $5, {{[0-9]+}}(%esp), %xmm0, %xmm0
+; AVX1-32-NEXT: vpinsrw $6, {{[0-9]+}}(%esp), %xmm0, %xmm0
+; AVX1-32-NEXT: vpinsrw $7, {{[0-9]+}}(%esp), %xmm0, %xmm0
+; AVX1-32-NEXT: vmovd {{.*#+}} xmm1 = mem[0],zero,zero,zero
+; AVX1-32-NEXT: vpinsrw $1, {{[0-9]+}}(%esp), %xmm1, %xmm1
+; AVX1-32-NEXT: vpinsrw $2, {{[0-9]+}}(%esp), %xmm1, %xmm1
+; AVX1-32-NEXT: vpinsrw $3, {{[0-9]+}}(%esp), %xmm1, %xmm1
+; AVX1-32-NEXT: vpinsrw $4, {{[0-9]+}}(%esp), %xmm1, %xmm1
+; AVX1-32-NEXT: vpinsrw $5, {{[0-9]+}}(%esp), %xmm1, %xmm1
+; AVX1-32-NEXT: vpinsrw $6, {{[0-9]+}}(%esp), %xmm1, %xmm1
+; AVX1-32-NEXT: vpinsrw $7, {{[0-9]+}}(%esp), %xmm1, %xmm1
+; AVX1-32-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
+; AVX1-32-NEXT: retl
+;
+; AVX1-64-LABEL: test_buildvector_v16i16:
+; AVX1-64: # BB#0:
+; AVX1-64-NEXT: vmovd %edi, %xmm0
+; AVX1-64-NEXT: vpinsrw $1, %esi, %xmm0, %xmm0
+; AVX1-64-NEXT: vpinsrw $2, %edx, %xmm0, %xmm0
+; AVX1-64-NEXT: vpinsrw $3, %ecx, %xmm0, %xmm0
+; AVX1-64-NEXT: vpinsrw $4, %r8d, %xmm0, %xmm0
+; AVX1-64-NEXT: vpinsrw $5, %r9d, %xmm0, %xmm0
+; AVX1-64-NEXT: vpinsrw $6, {{[0-9]+}}(%rsp), %xmm0, %xmm0
+; AVX1-64-NEXT: vpinsrw $7, {{[0-9]+}}(%rsp), %xmm0, %xmm0
+; AVX1-64-NEXT: vmovd {{.*#+}} xmm1 = mem[0],zero,zero,zero
+; AVX1-64-NEXT: vpinsrw $1, {{[0-9]+}}(%rsp), %xmm1, %xmm1
+; AVX1-64-NEXT: vpinsrw $2, {{[0-9]+}}(%rsp), %xmm1, %xmm1
+; AVX1-64-NEXT: vpinsrw $3, {{[0-9]+}}(%rsp), %xmm1, %xmm1
+; AVX1-64-NEXT: vpinsrw $4, {{[0-9]+}}(%rsp), %xmm1, %xmm1
+; AVX1-64-NEXT: vpinsrw $5, {{[0-9]+}}(%rsp), %xmm1, %xmm1
+; AVX1-64-NEXT: vpinsrw $6, {{[0-9]+}}(%rsp), %xmm1, %xmm1
+; AVX1-64-NEXT: vpinsrw $7, {{[0-9]+}}(%rsp), %xmm1, %xmm1
+; AVX1-64-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
+; AVX1-64-NEXT: retq
+;
+; AVX2-32-LABEL: test_buildvector_v16i16:
+; AVX2-32: # BB#0:
+; AVX2-32-NEXT: vmovd {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; AVX2-32-NEXT: vpinsrw $1, {{[0-9]+}}(%esp), %xmm0, %xmm0
+; AVX2-32-NEXT: vpinsrw $2, {{[0-9]+}}(%esp), %xmm0, %xmm0
+; AVX2-32-NEXT: vpinsrw $3, {{[0-9]+}}(%esp), %xmm0, %xmm0
+; AVX2-32-NEXT: vpinsrw $4, {{[0-9]+}}(%esp), %xmm0, %xmm0
+; AVX2-32-NEXT: vpinsrw $5, {{[0-9]+}}(%esp), %xmm0, %xmm0
+; AVX2-32-NEXT: vpinsrw $6, {{[0-9]+}}(%esp), %xmm0, %xmm0
+; AVX2-32-NEXT: vpinsrw $7, {{[0-9]+}}(%esp), %xmm0, %xmm0
+; AVX2-32-NEXT: vmovd {{.*#+}} xmm1 = mem[0],zero,zero,zero
+; AVX2-32-NEXT: vpinsrw $1, {{[0-9]+}}(%esp), %xmm1, %xmm1
+; AVX2-32-NEXT: vpinsrw $2, {{[0-9]+}}(%esp), %xmm1, %xmm1
+; AVX2-32-NEXT: vpinsrw $3, {{[0-9]+}}(%esp), %xmm1, %xmm1
+; AVX2-32-NEXT: vpinsrw $4, {{[0-9]+}}(%esp), %xmm1, %xmm1
+; AVX2-32-NEXT: vpinsrw $5, {{[0-9]+}}(%esp), %xmm1, %xmm1
+; AVX2-32-NEXT: vpinsrw $6, {{[0-9]+}}(%esp), %xmm1, %xmm1
+; AVX2-32-NEXT: vpinsrw $7, {{[0-9]+}}(%esp), %xmm1, %xmm1
+; AVX2-32-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm0
+; AVX2-32-NEXT: retl
+;
+; AVX2-64-LABEL: test_buildvector_v16i16:
+; AVX2-64: # BB#0:
+; AVX2-64-NEXT: vmovd %edi, %xmm0
+; AVX2-64-NEXT: vpinsrw $1, %esi, %xmm0, %xmm0
+; AVX2-64-NEXT: vpinsrw $2, %edx, %xmm0, %xmm0
+; AVX2-64-NEXT: vpinsrw $3, %ecx, %xmm0, %xmm0
+; AVX2-64-NEXT: vpinsrw $4, %r8d, %xmm0, %xmm0
+; AVX2-64-NEXT: vpinsrw $5, %r9d, %xmm0, %xmm0
+; AVX2-64-NEXT: vpinsrw $6, {{[0-9]+}}(%rsp), %xmm0, %xmm0
+; AVX2-64-NEXT: vpinsrw $7, {{[0-9]+}}(%rsp), %xmm0, %xmm0
+; AVX2-64-NEXT: vmovd {{.*#+}} xmm1 = mem[0],zero,zero,zero
+; AVX2-64-NEXT: vpinsrw $1, {{[0-9]+}}(%rsp), %xmm1, %xmm1
+; AVX2-64-NEXT: vpinsrw $2, {{[0-9]+}}(%rsp), %xmm1, %xmm1
+; AVX2-64-NEXT: vpinsrw $3, {{[0-9]+}}(%rsp), %xmm1, %xmm1
+; AVX2-64-NEXT: vpinsrw $4, {{[0-9]+}}(%rsp), %xmm1, %xmm1
+; AVX2-64-NEXT: vpinsrw $5, {{[0-9]+}}(%rsp), %xmm1, %xmm1
+; AVX2-64-NEXT: vpinsrw $6, {{[0-9]+}}(%rsp), %xmm1, %xmm1
+; AVX2-64-NEXT: vpinsrw $7, {{[0-9]+}}(%rsp), %xmm1, %xmm1
+; AVX2-64-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm0
+; AVX2-64-NEXT: retq
+ %ins0 = insertelement <16 x i16> undef, i16 %a0, i32 0
+ %ins1 = insertelement <16 x i16> %ins0, i16 %a1, i32 1
+ %ins2 = insertelement <16 x i16> %ins1, i16 %a2, i32 2
+ %ins3 = insertelement <16 x i16> %ins2, i16 %a3, i32 3
+ %ins4 = insertelement <16 x i16> %ins3, i16 %a4, i32 4
+ %ins5 = insertelement <16 x i16> %ins4, i16 %a5, i32 5
+ %ins6 = insertelement <16 x i16> %ins5, i16 %a6, i32 6
+ %ins7 = insertelement <16 x i16> %ins6, i16 %a7, i32 7
+ %ins8 = insertelement <16 x i16> %ins7, i16 %a8, i32 8
+ %ins9 = insertelement <16 x i16> %ins8, i16 %a9, i32 9
+ %ins10 = insertelement <16 x i16> %ins9, i16 %a10, i32 10
+ %ins11 = insertelement <16 x i16> %ins10, i16 %a11, i32 11
+ %ins12 = insertelement <16 x i16> %ins11, i16 %a12, i32 12
+ %ins13 = insertelement <16 x i16> %ins12, i16 %a13, i32 13
+ %ins14 = insertelement <16 x i16> %ins13, i16 %a14, i32 14
+ %ins15 = insertelement <16 x i16> %ins14, i16 %a15, i32 15
+ ret <16 x i16> %ins15
+}
+
+define <32 x i8> @test_buildvector_v32i8(i8 %a0, i8 %a1, i8 %a2, i8 %a3, i8 %a4, i8 %a5, i8 %a6, i8 %a7, i8 %a8, i8 %a9, i8 %a10, i8 %a11, i8 %a12, i8 %a13, i8 %a14, i8 %a15, i8 %a16, i8 %a17, i8 %a18, i8 %a19, i8 %a20, i8 %a21, i8 %a22, i8 %a23, i8 %a24, i8 %a25, i8 %a26, i8 %a27, i8 %a28, i8 %a29, i8 %a30, i8 %a31) {
+; AVX1-32-LABEL: test_buildvector_v32i8:
+; AVX1-32: # BB#0:
+; AVX1-32-NEXT: vmovd {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; AVX1-32-NEXT: vpinsrb $1, {{[0-9]+}}(%esp), %xmm0, %xmm0
+; AVX1-32-NEXT: vpinsrb $2, {{[0-9]+}}(%esp), %xmm0, %xmm0
+; AVX1-32-NEXT: vpinsrb $3, {{[0-9]+}}(%esp), %xmm0, %xmm0
+; AVX1-32-NEXT: vpinsrb $4, {{[0-9]+}}(%esp), %xmm0, %xmm0
+; AVX1-32-NEXT: vpinsrb $5, {{[0-9]+}}(%esp), %xmm0, %xmm0
+; AVX1-32-NEXT: vpinsrb $6, {{[0-9]+}}(%esp), %xmm0, %xmm0
+; AVX1-32-NEXT: vpinsrb $7, {{[0-9]+}}(%esp), %xmm0, %xmm0
+; AVX1-32-NEXT: vpinsrb $8, {{[0-9]+}}(%esp), %xmm0, %xmm0
+; AVX1-32-NEXT: vpinsrb $9, {{[0-9]+}}(%esp), %xmm0, %xmm0
+; AVX1-32-NEXT: vpinsrb $10, {{[0-9]+}}(%esp), %xmm0, %xmm0
+; AVX1-32-NEXT: vpinsrb $11, {{[0-9]+}}(%esp), %xmm0, %xmm0
+; AVX1-32-NEXT: vpinsrb $12, {{[0-9]+}}(%esp), %xmm0, %xmm0
+; AVX1-32-NEXT: vpinsrb $13, {{[0-9]+}}(%esp), %xmm0, %xmm0
+; AVX1-32-NEXT: vpinsrb $14, {{[0-9]+}}(%esp), %xmm0, %xmm0
+; AVX1-32-NEXT: vpinsrb $15, {{[0-9]+}}(%esp), %xmm0, %xmm0
+; AVX1-32-NEXT: vmovd {{.*#+}} xmm1 = mem[0],zero,zero,zero
+; AVX1-32-NEXT: vpinsrb $1, {{[0-9]+}}(%esp), %xmm1, %xmm1
+; AVX1-32-NEXT: vpinsrb $2, {{[0-9]+}}(%esp), %xmm1, %xmm1
+; AVX1-32-NEXT: vpinsrb $3, {{[0-9]+}}(%esp), %xmm1, %xmm1
+; AVX1-32-NEXT: vpinsrb $4, {{[0-9]+}}(%esp), %xmm1, %xmm1
+; AVX1-32-NEXT: vpinsrb $5, {{[0-9]+}}(%esp), %xmm1, %xmm1
+; AVX1-32-NEXT: vpinsrb $6, {{[0-9]+}}(%esp), %xmm1, %xmm1
+; AVX1-32-NEXT: vpinsrb $7, {{[0-9]+}}(%esp), %xmm1, %xmm1
+; AVX1-32-NEXT: vpinsrb $8, {{[0-9]+}}(%esp), %xmm1, %xmm1
+; AVX1-32-NEXT: vpinsrb $9, {{[0-9]+}}(%esp), %xmm1, %xmm1
+; AVX1-32-NEXT: vpinsrb $10, {{[0-9]+}}(%esp), %xmm1, %xmm1
+; AVX1-32-NEXT: vpinsrb $11, {{[0-9]+}}(%esp), %xmm1, %xmm1
+; AVX1-32-NEXT: vpinsrb $12, {{[0-9]+}}(%esp), %xmm1, %xmm1
+; AVX1-32-NEXT: vpinsrb $13, {{[0-9]+}}(%esp), %xmm1, %xmm1
+; AVX1-32-NEXT: vpinsrb $14, {{[0-9]+}}(%esp), %xmm1, %xmm1
+; AVX1-32-NEXT: vpinsrb $15, {{[0-9]+}}(%esp), %xmm1, %xmm1
+; AVX1-32-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
+; AVX1-32-NEXT: retl
+;
+; AVX1-64-LABEL: test_buildvector_v32i8:
+; AVX1-64: # BB#0:
+; AVX1-64-NEXT: vmovd %edi, %xmm0
+; AVX1-64-NEXT: vpinsrb $1, %esi, %xmm0, %xmm0
+; AVX1-64-NEXT: vpinsrb $2, %edx, %xmm0, %xmm0
+; AVX1-64-NEXT: vpinsrb $3, %ecx, %xmm0, %xmm0
+; AVX1-64-NEXT: vpinsrb $4, %r8d, %xmm0, %xmm0
+; AVX1-64-NEXT: vpinsrb $5, %r9d, %xmm0, %xmm0
+; AVX1-64-NEXT: vpinsrb $6, {{[0-9]+}}(%rsp), %xmm0, %xmm0
+; AVX1-64-NEXT: vpinsrb $7, {{[0-9]+}}(%rsp), %xmm0, %xmm0
+; AVX1-64-NEXT: vpinsrb $8, {{[0-9]+}}(%rsp), %xmm0, %xmm0
+; AVX1-64-NEXT: vpinsrb $9, {{[0-9]+}}(%rsp), %xmm0, %xmm0
+; AVX1-64-NEXT: vpinsrb $10, {{[0-9]+}}(%rsp), %xmm0, %xmm0
+; AVX1-64-NEXT: vpinsrb $11, {{[0-9]+}}(%rsp), %xmm0, %xmm0
+; AVX1-64-NEXT: vpinsrb $12, {{[0-9]+}}(%rsp), %xmm0, %xmm0
+; AVX1-64-NEXT: vpinsrb $13, {{[0-9]+}}(%rsp), %xmm0, %xmm0
+; AVX1-64-NEXT: vpinsrb $14, {{[0-9]+}}(%rsp), %xmm0, %xmm0
+; AVX1-64-NEXT: vpinsrb $15, {{[0-9]+}}(%rsp), %xmm0, %xmm0
+; AVX1-64-NEXT: vmovd {{.*#+}} xmm1 = mem[0],zero,zero,zero
+; AVX1-64-NEXT: vpinsrb $1, {{[0-9]+}}(%rsp), %xmm1, %xmm1
+; AVX1-64-NEXT: vpinsrb $2, {{[0-9]+}}(%rsp), %xmm1, %xmm1
+; AVX1-64-NEXT: vpinsrb $3, {{[0-9]+}}(%rsp), %xmm1, %xmm1
+; AVX1-64-NEXT: vpinsrb $4, {{[0-9]+}}(%rsp), %xmm1, %xmm1
+; AVX1-64-NEXT: vpinsrb $5, {{[0-9]+}}(%rsp), %xmm1, %xmm1
+; AVX1-64-NEXT: vpinsrb $6, {{[0-9]+}}(%rsp), %xmm1, %xmm1
+; AVX1-64-NEXT: vpinsrb $7, {{[0-9]+}}(%rsp), %xmm1, %xmm1
+; AVX1-64-NEXT: vpinsrb $8, {{[0-9]+}}(%rsp), %xmm1, %xmm1
+; AVX1-64-NEXT: vpinsrb $9, {{[0-9]+}}(%rsp), %xmm1, %xmm1
+; AVX1-64-NEXT: vpinsrb $10, {{[0-9]+}}(%rsp), %xmm1, %xmm1
+; AVX1-64-NEXT: vpinsrb $11, {{[0-9]+}}(%rsp), %xmm1, %xmm1
+; AVX1-64-NEXT: vpinsrb $12, {{[0-9]+}}(%rsp), %xmm1, %xmm1
+; AVX1-64-NEXT: vpinsrb $13, {{[0-9]+}}(%rsp), %xmm1, %xmm1
+; AVX1-64-NEXT: vpinsrb $14, {{[0-9]+}}(%rsp), %xmm1, %xmm1
+; AVX1-64-NEXT: vpinsrb $15, {{[0-9]+}}(%rsp), %xmm1, %xmm1
+; AVX1-64-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
+; AVX1-64-NEXT: retq
+;
+; AVX2-32-LABEL: test_buildvector_v32i8:
+; AVX2-32: # BB#0:
+; AVX2-32-NEXT: vmovd {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; AVX2-32-NEXT: vpinsrb $1, {{[0-9]+}}(%esp), %xmm0, %xmm0
+; AVX2-32-NEXT: vpinsrb $2, {{[0-9]+}}(%esp), %xmm0, %xmm0
+; AVX2-32-NEXT: vpinsrb $3, {{[0-9]+}}(%esp), %xmm0, %xmm0
+; AVX2-32-NEXT: vpinsrb $4, {{[0-9]+}}(%esp), %xmm0, %xmm0
+; AVX2-32-NEXT: vpinsrb $5, {{[0-9]+}}(%esp), %xmm0, %xmm0
+; AVX2-32-NEXT: vpinsrb $6, {{[0-9]+}}(%esp), %xmm0, %xmm0
+; AVX2-32-NEXT: vpinsrb $7, {{[0-9]+}}(%esp), %xmm0, %xmm0
+; AVX2-32-NEXT: vpinsrb $8, {{[0-9]+}}(%esp), %xmm0, %xmm0
+; AVX2-32-NEXT: vpinsrb $9, {{[0-9]+}}(%esp), %xmm0, %xmm0
+; AVX2-32-NEXT: vpinsrb $10, {{[0-9]+}}(%esp), %xmm0, %xmm0
+; AVX2-32-NEXT: vpinsrb $11, {{[0-9]+}}(%esp), %xmm0, %xmm0
+; AVX2-32-NEXT: vpinsrb $12, {{[0-9]+}}(%esp), %xmm0, %xmm0
+; AVX2-32-NEXT: vpinsrb $13, {{[0-9]+}}(%esp), %xmm0, %xmm0
+; AVX2-32-NEXT: vpinsrb $14, {{[0-9]+}}(%esp), %xmm0, %xmm0
+; AVX2-32-NEXT: vpinsrb $15, {{[0-9]+}}(%esp), %xmm0, %xmm0
+; AVX2-32-NEXT: vmovd {{.*#+}} xmm1 = mem[0],zero,zero,zero
+; AVX2-32-NEXT: vpinsrb $1, {{[0-9]+}}(%esp), %xmm1, %xmm1
+; AVX2-32-NEXT: vpinsrb $2, {{[0-9]+}}(%esp), %xmm1, %xmm1
+; AVX2-32-NEXT: vpinsrb $3, {{[0-9]+}}(%esp), %xmm1, %xmm1
+; AVX2-32-NEXT: vpinsrb $4, {{[0-9]+}}(%esp), %xmm1, %xmm1
+; AVX2-32-NEXT: vpinsrb $5, {{[0-9]+}}(%esp), %xmm1, %xmm1
+; AVX2-32-NEXT: vpinsrb $6, {{[0-9]+}}(%esp), %xmm1, %xmm1
+; AVX2-32-NEXT: vpinsrb $7, {{[0-9]+}}(%esp), %xmm1, %xmm1
+; AVX2-32-NEXT: vpinsrb $8, {{[0-9]+}}(%esp), %xmm1, %xmm1
+; AVX2-32-NEXT: vpinsrb $9, {{[0-9]+}}(%esp), %xmm1, %xmm1
+; AVX2-32-NEXT: vpinsrb $10, {{[0-9]+}}(%esp), %xmm1, %xmm1
+; AVX2-32-NEXT: vpinsrb $11, {{[0-9]+}}(%esp), %xmm1, %xmm1
+; AVX2-32-NEXT: vpinsrb $12, {{[0-9]+}}(%esp), %xmm1, %xmm1
+; AVX2-32-NEXT: vpinsrb $13, {{[0-9]+}}(%esp), %xmm1, %xmm1
+; AVX2-32-NEXT: vpinsrb $14, {{[0-9]+}}(%esp), %xmm1, %xmm1
+; AVX2-32-NEXT: vpinsrb $15, {{[0-9]+}}(%esp), %xmm1, %xmm1
+; AVX2-32-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm0
+; AVX2-32-NEXT: retl
+;
+; AVX2-64-LABEL: test_buildvector_v32i8:
+; AVX2-64: # BB#0:
+; AVX2-64-NEXT: vmovd %edi, %xmm0
+; AVX2-64-NEXT: vpinsrb $1, %esi, %xmm0, %xmm0
+; AVX2-64-NEXT: vpinsrb $2, %edx, %xmm0, %xmm0
+; AVX2-64-NEXT: vpinsrb $3, %ecx, %xmm0, %xmm0
+; AVX2-64-NEXT: vpinsrb $4, %r8d, %xmm0, %xmm0
+; AVX2-64-NEXT: vpinsrb $5, %r9d, %xmm0, %xmm0
+; AVX2-64-NEXT: vpinsrb $6, {{[0-9]+}}(%rsp), %xmm0, %xmm0
+; AVX2-64-NEXT: vpinsrb $7, {{[0-9]+}}(%rsp), %xmm0, %xmm0
+; AVX2-64-NEXT: vpinsrb $8, {{[0-9]+}}(%rsp), %xmm0, %xmm0
+; AVX2-64-NEXT: vpinsrb $9, {{[0-9]+}}(%rsp), %xmm0, %xmm0
+; AVX2-64-NEXT: vpinsrb $10, {{[0-9]+}}(%rsp), %xmm0, %xmm0
+; AVX2-64-NEXT: vpinsrb $11, {{[0-9]+}}(%rsp), %xmm0, %xmm0
+; AVX2-64-NEXT: vpinsrb $12, {{[0-9]+}}(%rsp), %xmm0, %xmm0
+; AVX2-64-NEXT: vpinsrb $13, {{[0-9]+}}(%rsp), %xmm0, %xmm0
+; AVX2-64-NEXT: vpinsrb $14, {{[0-9]+}}(%rsp), %xmm0, %xmm0
+; AVX2-64-NEXT: vpinsrb $15, {{[0-9]+}}(%rsp), %xmm0, %xmm0
+; AVX2-64-NEXT: vmovd {{.*#+}} xmm1 = mem[0],zero,zero,zero
+; AVX2-64-NEXT: vpinsrb $1, {{[0-9]+}}(%rsp), %xmm1, %xmm1
+; AVX2-64-NEXT: vpinsrb $2, {{[0-9]+}}(%rsp), %xmm1, %xmm1
+; AVX2-64-NEXT: vpinsrb $3, {{[0-9]+}}(%rsp), %xmm1, %xmm1
+; AVX2-64-NEXT: vpinsrb $4, {{[0-9]+}}(%rsp), %xmm1, %xmm1
+; AVX2-64-NEXT: vpinsrb $5, {{[0-9]+}}(%rsp), %xmm1, %xmm1
+; AVX2-64-NEXT: vpinsrb $6, {{[0-9]+}}(%rsp), %xmm1, %xmm1
+; AVX2-64-NEXT: vpinsrb $7, {{[0-9]+}}(%rsp), %xmm1, %xmm1
+; AVX2-64-NEXT: vpinsrb $8, {{[0-9]+}}(%rsp), %xmm1, %xmm1
+; AVX2-64-NEXT: vpinsrb $9, {{[0-9]+}}(%rsp), %xmm1, %xmm1
+; AVX2-64-NEXT: vpinsrb $10, {{[0-9]+}}(%rsp), %xmm1, %xmm1
+; AVX2-64-NEXT: vpinsrb $11, {{[0-9]+}}(%rsp), %xmm1, %xmm1
+; AVX2-64-NEXT: vpinsrb $12, {{[0-9]+}}(%rsp), %xmm1, %xmm1
+; AVX2-64-NEXT: vpinsrb $13, {{[0-9]+}}(%rsp), %xmm1, %xmm1
+; AVX2-64-NEXT: vpinsrb $14, {{[0-9]+}}(%rsp), %xmm1, %xmm1
+; AVX2-64-NEXT: vpinsrb $15, {{[0-9]+}}(%rsp), %xmm1, %xmm1
+; AVX2-64-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm0
+; AVX2-64-NEXT: retq
+ %ins0 = insertelement <32 x i8> undef, i8 %a0, i32 0
+ %ins1 = insertelement <32 x i8> %ins0, i8 %a1, i32 1
+ %ins2 = insertelement <32 x i8> %ins1, i8 %a2, i32 2
+ %ins3 = insertelement <32 x i8> %ins2, i8 %a3, i32 3
+ %ins4 = insertelement <32 x i8> %ins3, i8 %a4, i32 4
+ %ins5 = insertelement <32 x i8> %ins4, i8 %a5, i32 5
+ %ins6 = insertelement <32 x i8> %ins5, i8 %a6, i32 6
+ %ins7 = insertelement <32 x i8> %ins6, i8 %a7, i32 7
+ %ins8 = insertelement <32 x i8> %ins7, i8 %a8, i32 8
+ %ins9 = insertelement <32 x i8> %ins8, i8 %a9, i32 9
+ %ins10 = insertelement <32 x i8> %ins9, i8 %a10, i32 10
+ %ins11 = insertelement <32 x i8> %ins10, i8 %a11, i32 11
+ %ins12 = insertelement <32 x i8> %ins11, i8 %a12, i32 12
+ %ins13 = insertelement <32 x i8> %ins12, i8 %a13, i32 13
+ %ins14 = insertelement <32 x i8> %ins13, i8 %a14, i32 14
+ %ins15 = insertelement <32 x i8> %ins14, i8 %a15, i32 15
+ %ins16 = insertelement <32 x i8> %ins15, i8 %a16, i32 16
+ %ins17 = insertelement <32 x i8> %ins16, i8 %a17, i32 17
+ %ins18 = insertelement <32 x i8> %ins17, i8 %a18, i32 18
+ %ins19 = insertelement <32 x i8> %ins18, i8 %a19, i32 19
+ %ins20 = insertelement <32 x i8> %ins19, i8 %a20, i32 20
+ %ins21 = insertelement <32 x i8> %ins20, i8 %a21, i32 21
+ %ins22 = insertelement <32 x i8> %ins21, i8 %a22, i32 22
+ %ins23 = insertelement <32 x i8> %ins22, i8 %a23, i32 23
+ %ins24 = insertelement <32 x i8> %ins23, i8 %a24, i32 24
+ %ins25 = insertelement <32 x i8> %ins24, i8 %a25, i32 25
+ %ins26 = insertelement <32 x i8> %ins25, i8 %a26, i32 26
+ %ins27 = insertelement <32 x i8> %ins26, i8 %a27, i32 27
+ %ins28 = insertelement <32 x i8> %ins27, i8 %a28, i32 28
+ %ins29 = insertelement <32 x i8> %ins28, i8 %a29, i32 29
+ %ins30 = insertelement <32 x i8> %ins29, i8 %a30, i32 30
+ %ins31 = insertelement <32 x i8> %ins30, i8 %a31, i32 31
+ ret <32 x i8> %ins31
+}
diff --git a/test/CodeGen/X86/build-vector-512.ll b/test/CodeGen/X86/build-vector-512.ll
new file mode 100644
index 000000000000..21737cca93a1
--- /dev/null
+++ b/test/CodeGen/X86/build-vector-512.ll
@@ -0,0 +1,712 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc < %s -mtriple=i686-unknown-unknown -mattr=+avx512f | FileCheck %s --check-prefix=AVX-32 --check-prefix=AVX512F-32
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512f | FileCheck %s --check-prefix=AVX-64 --check-prefix=AVX512F-64
+; RUN: llc < %s -mtriple=i686-unknown-unknown -mattr=+avx512f,+avx512bw | FileCheck %s --check-prefix=AVX-32 --check-prefix=AVX512BW-32
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512f,+avx512bw | FileCheck %s --check-prefix=AVX-64 --check-prefix=AVX512BW-64
+
+define <8 x double> @test_buildvector_v8f64(double %a0, double %a1, double %a2, double %a3, double %a4, double %a5, double %a6, double %a7) {
+; AVX-32-LABEL: test_buildvector_v8f64:
+; AVX-32: # BB#0:
+; AVX-32-NEXT: vmovups {{[0-9]+}}(%esp), %zmm0
+; AVX-32-NEXT: retl
+;
+; AVX-64-LABEL: test_buildvector_v8f64:
+; AVX-64: # BB#0:
+; AVX-64-NEXT: vunpcklpd {{.*#+}} xmm6 = xmm6[0],xmm7[0]
+; AVX-64-NEXT: vunpcklpd {{.*#+}} xmm4 = xmm4[0],xmm5[0]
+; AVX-64-NEXT: vinsertf128 $1, %xmm6, %ymm4, %ymm4
+; AVX-64-NEXT: vunpcklpd {{.*#+}} xmm2 = xmm2[0],xmm3[0]
+; AVX-64-NEXT: vunpcklpd {{.*#+}} xmm0 = xmm0[0],xmm1[0]
+; AVX-64-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0
+; AVX-64-NEXT: vinsertf64x4 $1, %ymm4, %zmm0, %zmm0
+; AVX-64-NEXT: retq
+ %ins0 = insertelement <8 x double> undef, double %a0, i32 0
+ %ins1 = insertelement <8 x double> %ins0, double %a1, i32 1
+ %ins2 = insertelement <8 x double> %ins1, double %a2, i32 2
+ %ins3 = insertelement <8 x double> %ins2, double %a3, i32 3
+ %ins4 = insertelement <8 x double> %ins3, double %a4, i32 4
+ %ins5 = insertelement <8 x double> %ins4, double %a5, i32 5
+ %ins6 = insertelement <8 x double> %ins5, double %a6, i32 6
+ %ins7 = insertelement <8 x double> %ins6, double %a7, i32 7
+ ret <8 x double> %ins7
+}
+
+define <16 x float> @test_buildvector_v16f32(float %a0, float %a1, float %a2, float %a3, float %a4, float %a5, float %a6, float %a7, float %a8, float %a9, float %a10, float %a11, float %a12, float %a13, float %a14, float %a15) {
+; AVX-32-LABEL: test_buildvector_v16f32:
+; AVX-32: # BB#0:
+; AVX-32-NEXT: vmovups {{[0-9]+}}(%esp), %zmm0
+; AVX-32-NEXT: retl
+;
+; AVX-64-LABEL: test_buildvector_v16f32:
+; AVX-64: # BB#0:
+; AVX-64-NEXT: vinsertps {{.*#+}} xmm4 = xmm4[0],xmm5[0],xmm4[2,3]
+; AVX-64-NEXT: vinsertps {{.*#+}} xmm4 = xmm4[0,1],xmm6[0],xmm4[3]
+; AVX-64-NEXT: vinsertps {{.*#+}} xmm4 = xmm4[0,1,2],xmm7[0]
+; AVX-64-NEXT: vinsertps {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[2,3]
+; AVX-64-NEXT: vinsertps {{.*#+}} xmm0 = xmm0[0,1],xmm2[0],xmm0[3]
+; AVX-64-NEXT: vinsertps {{.*#+}} xmm0 = xmm0[0,1,2],xmm3[0]
+; AVX-64-NEXT: vinsertf128 $1, %xmm4, %ymm0, %ymm0
+; AVX-64-NEXT: vmovss {{.*#+}} xmm1 = mem[0],zero,zero,zero
+; AVX-64-NEXT: vinsertps {{.*#+}} xmm1 = xmm1[0],mem[0],xmm1[2,3]
+; AVX-64-NEXT: vinsertps {{.*#+}} xmm1 = xmm1[0,1],mem[0],xmm1[3]
+; AVX-64-NEXT: vinsertps {{.*#+}} xmm1 = xmm1[0,1,2],mem[0]
+; AVX-64-NEXT: vmovss {{.*#+}} xmm2 = mem[0],zero,zero,zero
+; AVX-64-NEXT: vinsertps {{.*#+}} xmm2 = xmm2[0],mem[0],xmm2[2,3]
+; AVX-64-NEXT: vinsertps {{.*#+}} xmm2 = xmm2[0,1],mem[0],xmm2[3]
+; AVX-64-NEXT: vinsertps {{.*#+}} xmm2 = xmm2[0,1,2],mem[0]
+; AVX-64-NEXT: vinsertf128 $1, %xmm2, %ymm1, %ymm1
+; AVX-64-NEXT: vinsertf64x4 $1, %ymm1, %zmm0, %zmm0
+; AVX-64-NEXT: retq
+ %ins0 = insertelement <16 x float> undef, float %a0, i32 0
+ %ins1 = insertelement <16 x float> %ins0, float %a1, i32 1
+ %ins2 = insertelement <16 x float> %ins1, float %a2, i32 2
+ %ins3 = insertelement <16 x float> %ins2, float %a3, i32 3
+ %ins4 = insertelement <16 x float> %ins3, float %a4, i32 4
+ %ins5 = insertelement <16 x float> %ins4, float %a5, i32 5
+ %ins6 = insertelement <16 x float> %ins5, float %a6, i32 6
+ %ins7 = insertelement <16 x float> %ins6, float %a7, i32 7
+ %ins8 = insertelement <16 x float> %ins7, float %a8, i32 8
+ %ins9 = insertelement <16 x float> %ins8, float %a9, i32 9
+ %ins10 = insertelement <16 x float> %ins9, float %a10, i32 10
+ %ins11 = insertelement <16 x float> %ins10, float %a11, i32 11
+ %ins12 = insertelement <16 x float> %ins11, float %a12, i32 12
+ %ins13 = insertelement <16 x float> %ins12, float %a13, i32 13
+ %ins14 = insertelement <16 x float> %ins13, float %a14, i32 14
+ %ins15 = insertelement <16 x float> %ins14, float %a15, i32 15
+ ret <16 x float> %ins15
+}
+
+define <8 x i64> @test_buildvector_v8i64(i64 %a0, i64 %a1, i64 %a2, i64 %a3, i64 %a4, i64 %a5, i64 %a6, i64 %a7) {
+; AVX-32-LABEL: test_buildvector_v8i64:
+; AVX-32: # BB#0:
+; AVX-32-NEXT: vmovd {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; AVX-32-NEXT: vpinsrd $1, {{[0-9]+}}(%esp), %xmm0, %xmm0
+; AVX-32-NEXT: vpinsrd $2, {{[0-9]+}}(%esp), %xmm0, %xmm0
+; AVX-32-NEXT: vpinsrd $3, {{[0-9]+}}(%esp), %xmm0, %xmm0
+; AVX-32-NEXT: vmovd {{.*#+}} xmm1 = mem[0],zero,zero,zero
+; AVX-32-NEXT: vpinsrd $1, {{[0-9]+}}(%esp), %xmm1, %xmm1
+; AVX-32-NEXT: vpinsrd $2, {{[0-9]+}}(%esp), %xmm1, %xmm1
+; AVX-32-NEXT: vpinsrd $3, {{[0-9]+}}(%esp), %xmm1, %xmm1
+; AVX-32-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm0
+; AVX-32-NEXT: vmovd {{.*#+}} xmm1 = mem[0],zero,zero,zero
+; AVX-32-NEXT: vpinsrd $1, {{[0-9]+}}(%esp), %xmm1, %xmm1
+; AVX-32-NEXT: vpinsrd $2, {{[0-9]+}}(%esp), %xmm1, %xmm1
+; AVX-32-NEXT: vpinsrd $3, {{[0-9]+}}(%esp), %xmm1, %xmm1
+; AVX-32-NEXT: vmovd {{.*#+}} xmm2 = mem[0],zero,zero,zero
+; AVX-32-NEXT: vpinsrd $1, {{[0-9]+}}(%esp), %xmm2, %xmm2
+; AVX-32-NEXT: vpinsrd $2, {{[0-9]+}}(%esp), %xmm2, %xmm2
+; AVX-32-NEXT: vpinsrd $3, {{[0-9]+}}(%esp), %xmm2, %xmm2
+; AVX-32-NEXT: vinserti128 $1, %xmm2, %ymm1, %ymm1
+; AVX-32-NEXT: vinserti64x4 $1, %ymm0, %zmm1, %zmm0
+; AVX-32-NEXT: retl
+;
+; AVX-64-LABEL: test_buildvector_v8i64:
+; AVX-64: # BB#0:
+; AVX-64-NEXT: vmovq %rcx, %xmm0
+; AVX-64-NEXT: vmovq %rdx, %xmm1
+; AVX-64-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm1[0],xmm0[0]
+; AVX-64-NEXT: vmovq %rsi, %xmm1
+; AVX-64-NEXT: vmovq %rdi, %xmm2
+; AVX-64-NEXT: vpunpcklqdq {{.*#+}} xmm1 = xmm2[0],xmm1[0]
+; AVX-64-NEXT: vinserti128 $1, %xmm0, %ymm1, %ymm0
+; AVX-64-NEXT: vmovq %r9, %xmm1
+; AVX-64-NEXT: vmovq %r8, %xmm2
+; AVX-64-NEXT: vpunpcklqdq {{.*#+}} xmm1 = xmm2[0],xmm1[0]
+; AVX-64-NEXT: vinserti128 $1, {{[0-9]+}}(%rsp), %ymm1, %ymm1
+; AVX-64-NEXT: vinserti64x4 $1, %ymm1, %zmm0, %zmm0
+; AVX-64-NEXT: retq
+ %ins0 = insertelement <8 x i64> undef, i64 %a0, i32 0
+ %ins1 = insertelement <8 x i64> %ins0, i64 %a1, i32 1
+ %ins2 = insertelement <8 x i64> %ins1, i64 %a2, i32 2
+ %ins3 = insertelement <8 x i64> %ins2, i64 %a3, i32 3
+ %ins4 = insertelement <8 x i64> %ins3, i64 %a4, i32 4
+ %ins5 = insertelement <8 x i64> %ins4, i64 %a5, i32 5
+ %ins6 = insertelement <8 x i64> %ins5, i64 %a6, i32 6
+ %ins7 = insertelement <8 x i64> %ins6, i64 %a7, i32 7
+ ret <8 x i64> %ins7
+}
+
+define <16 x i32> @test_buildvector_v16i32(i32 %a0, i32 %a1, i32 %a2, i32 %a3, i32 %a4, i32 %a5, i32 %a6, i32 %a7, i32 %a8, i32 %a9, i32 %a10, i32 %a11, i32 %a12, i32 %a13, i32 %a14, i32 %a15) {
+; AVX-32-LABEL: test_buildvector_v16i32:
+; AVX-32: # BB#0:
+; AVX-32-NEXT: vmovups {{[0-9]+}}(%esp), %zmm0
+; AVX-32-NEXT: retl
+;
+; AVX-64-LABEL: test_buildvector_v16i32:
+; AVX-64: # BB#0:
+; AVX-64-NEXT: vmovd %edi, %xmm0
+; AVX-64-NEXT: vpinsrd $1, %esi, %xmm0, %xmm0
+; AVX-64-NEXT: vpinsrd $2, %edx, %xmm0, %xmm0
+; AVX-64-NEXT: vpinsrd $3, %ecx, %xmm0, %xmm0
+; AVX-64-NEXT: vmovd %r8d, %xmm1
+; AVX-64-NEXT: vpinsrd $1, %r9d, %xmm1, %xmm1
+; AVX-64-NEXT: vpinsrd $2, {{[0-9]+}}(%rsp), %xmm1, %xmm1
+; AVX-64-NEXT: vpinsrd $3, {{[0-9]+}}(%rsp), %xmm1, %xmm1
+; AVX-64-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm0
+; AVX-64-NEXT: vmovd {{.*#+}} xmm1 = mem[0],zero,zero,zero
+; AVX-64-NEXT: vpinsrd $1, {{[0-9]+}}(%rsp), %xmm1, %xmm1
+; AVX-64-NEXT: vpinsrd $2, {{[0-9]+}}(%rsp), %xmm1, %xmm1
+; AVX-64-NEXT: vpinsrd $3, {{[0-9]+}}(%rsp), %xmm1, %xmm1
+; AVX-64-NEXT: vmovd {{.*#+}} xmm2 = mem[0],zero,zero,zero
+; AVX-64-NEXT: vpinsrd $1, {{[0-9]+}}(%rsp), %xmm2, %xmm2
+; AVX-64-NEXT: vpinsrd $2, {{[0-9]+}}(%rsp), %xmm2, %xmm2
+; AVX-64-NEXT: vpinsrd $3, {{[0-9]+}}(%rsp), %xmm2, %xmm2
+; AVX-64-NEXT: vinserti128 $1, %xmm2, %ymm1, %ymm1
+; AVX-64-NEXT: vinserti64x4 $1, %ymm1, %zmm0, %zmm0
+; AVX-64-NEXT: retq
+ %ins0 = insertelement <16 x i32> undef, i32 %a0, i32 0
+ %ins1 = insertelement <16 x i32> %ins0, i32 %a1, i32 1
+ %ins2 = insertelement <16 x i32> %ins1, i32 %a2, i32 2
+ %ins3 = insertelement <16 x i32> %ins2, i32 %a3, i32 3
+ %ins4 = insertelement <16 x i32> %ins3, i32 %a4, i32 4
+ %ins5 = insertelement <16 x i32> %ins4, i32 %a5, i32 5
+ %ins6 = insertelement <16 x i32> %ins5, i32 %a6, i32 6
+ %ins7 = insertelement <16 x i32> %ins6, i32 %a7, i32 7
+ %ins8 = insertelement <16 x i32> %ins7, i32 %a8, i32 8
+ %ins9 = insertelement <16 x i32> %ins8, i32 %a9, i32 9
+ %ins10 = insertelement <16 x i32> %ins9, i32 %a10, i32 10
+ %ins11 = insertelement <16 x i32> %ins10, i32 %a11, i32 11
+ %ins12 = insertelement <16 x i32> %ins11, i32 %a12, i32 12
+ %ins13 = insertelement <16 x i32> %ins12, i32 %a13, i32 13
+ %ins14 = insertelement <16 x i32> %ins13, i32 %a14, i32 14
+ %ins15 = insertelement <16 x i32> %ins14, i32 %a15, i32 15
+ ret <16 x i32> %ins15
+}
+
+define <32 x i16> @test_buildvector_v32i16(i16 %a0, i16 %a1, i16 %a2, i16 %a3, i16 %a4, i16 %a5, i16 %a6, i16 %a7, i16 %a8, i16 %a9, i16 %a10, i16 %a11, i16 %a12, i16 %a13, i16 %a14, i16 %a15, i16 %a16, i16 %a17, i16 %a18, i16 %a19, i16 %a20, i16 %a21, i16 %a22, i16 %a23, i16 %a24, i16 %a25, i16 %a26, i16 %a27, i16 %a28, i16 %a29, i16 %a30, i16 %a31) {
+; AVX512F-32-LABEL: test_buildvector_v32i16:
+; AVX512F-32: # BB#0:
+; AVX512F-32-NEXT: vmovd {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; AVX512F-32-NEXT: vpinsrw $1, {{[0-9]+}}(%esp), %xmm0, %xmm0
+; AVX512F-32-NEXT: vpinsrw $2, {{[0-9]+}}(%esp), %xmm0, %xmm0
+; AVX512F-32-NEXT: vpinsrw $3, {{[0-9]+}}(%esp), %xmm0, %xmm0
+; AVX512F-32-NEXT: vpinsrw $4, {{[0-9]+}}(%esp), %xmm0, %xmm0
+; AVX512F-32-NEXT: vpinsrw $5, {{[0-9]+}}(%esp), %xmm0, %xmm0
+; AVX512F-32-NEXT: vpinsrw $6, {{[0-9]+}}(%esp), %xmm0, %xmm0
+; AVX512F-32-NEXT: vpinsrw $7, {{[0-9]+}}(%esp), %xmm0, %xmm0
+; AVX512F-32-NEXT: vmovd {{.*#+}} xmm1 = mem[0],zero,zero,zero
+; AVX512F-32-NEXT: vpinsrw $1, {{[0-9]+}}(%esp), %xmm1, %xmm1
+; AVX512F-32-NEXT: vpinsrw $2, {{[0-9]+}}(%esp), %xmm1, %xmm1
+; AVX512F-32-NEXT: vpinsrw $3, {{[0-9]+}}(%esp), %xmm1, %xmm1
+; AVX512F-32-NEXT: vpinsrw $4, {{[0-9]+}}(%esp), %xmm1, %xmm1
+; AVX512F-32-NEXT: vpinsrw $5, {{[0-9]+}}(%esp), %xmm1, %xmm1
+; AVX512F-32-NEXT: vpinsrw $6, {{[0-9]+}}(%esp), %xmm1, %xmm1
+; AVX512F-32-NEXT: vpinsrw $7, {{[0-9]+}}(%esp), %xmm1, %xmm1
+; AVX512F-32-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm0
+; AVX512F-32-NEXT: vmovd {{.*#+}} xmm1 = mem[0],zero,zero,zero
+; AVX512F-32-NEXT: vpinsrw $1, {{[0-9]+}}(%esp), %xmm1, %xmm1
+; AVX512F-32-NEXT: vpinsrw $2, {{[0-9]+}}(%esp), %xmm1, %xmm1
+; AVX512F-32-NEXT: vpinsrw $3, {{[0-9]+}}(%esp), %xmm1, %xmm1
+; AVX512F-32-NEXT: vpinsrw $4, {{[0-9]+}}(%esp), %xmm1, %xmm1
+; AVX512F-32-NEXT: vpinsrw $5, {{[0-9]+}}(%esp), %xmm1, %xmm1
+; AVX512F-32-NEXT: vpinsrw $6, {{[0-9]+}}(%esp), %xmm1, %xmm1
+; AVX512F-32-NEXT: vpinsrw $7, {{[0-9]+}}(%esp), %xmm1, %xmm1
+; AVX512F-32-NEXT: vmovd {{.*#+}} xmm2 = mem[0],zero,zero,zero
+; AVX512F-32-NEXT: vpinsrw $1, {{[0-9]+}}(%esp), %xmm2, %xmm2
+; AVX512F-32-NEXT: vpinsrw $2, {{[0-9]+}}(%esp), %xmm2, %xmm2
+; AVX512F-32-NEXT: vpinsrw $3, {{[0-9]+}}(%esp), %xmm2, %xmm2
+; AVX512F-32-NEXT: vpinsrw $4, {{[0-9]+}}(%esp), %xmm2, %xmm2
+; AVX512F-32-NEXT: vpinsrw $5, {{[0-9]+}}(%esp), %xmm2, %xmm2
+; AVX512F-32-NEXT: vpinsrw $6, {{[0-9]+}}(%esp), %xmm2, %xmm2
+; AVX512F-32-NEXT: vpinsrw $7, {{[0-9]+}}(%esp), %xmm2, %xmm2
+; AVX512F-32-NEXT: vinserti128 $1, %xmm2, %ymm1, %ymm1
+; AVX512F-32-NEXT: retl
+;
+; AVX512F-64-LABEL: test_buildvector_v32i16:
+; AVX512F-64: # BB#0:
+; AVX512F-64-NEXT: vmovd {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; AVX512F-64-NEXT: vpinsrw $1, {{[0-9]+}}(%rsp), %xmm0, %xmm0
+; AVX512F-64-NEXT: vpinsrw $2, {{[0-9]+}}(%rsp), %xmm0, %xmm0
+; AVX512F-64-NEXT: vpinsrw $3, {{[0-9]+}}(%rsp), %xmm0, %xmm0
+; AVX512F-64-NEXT: vpinsrw $4, {{[0-9]+}}(%rsp), %xmm0, %xmm0
+; AVX512F-64-NEXT: vpinsrw $5, {{[0-9]+}}(%rsp), %xmm0, %xmm0
+; AVX512F-64-NEXT: vpinsrw $6, {{[0-9]+}}(%rsp), %xmm0, %xmm0
+; AVX512F-64-NEXT: vpinsrw $7, {{[0-9]+}}(%rsp), %xmm0, %xmm0
+; AVX512F-64-NEXT: vmovd {{.*#+}} xmm1 = mem[0],zero,zero,zero
+; AVX512F-64-NEXT: vpinsrw $1, {{[0-9]+}}(%rsp), %xmm1, %xmm1
+; AVX512F-64-NEXT: vpinsrw $2, {{[0-9]+}}(%rsp), %xmm1, %xmm1
+; AVX512F-64-NEXT: vpinsrw $3, {{[0-9]+}}(%rsp), %xmm1, %xmm1
+; AVX512F-64-NEXT: vpinsrw $4, {{[0-9]+}}(%rsp), %xmm1, %xmm1
+; AVX512F-64-NEXT: vpinsrw $5, {{[0-9]+}}(%rsp), %xmm1, %xmm1
+; AVX512F-64-NEXT: vpinsrw $6, {{[0-9]+}}(%rsp), %xmm1, %xmm1
+; AVX512F-64-NEXT: vpinsrw $7, {{[0-9]+}}(%rsp), %xmm1, %xmm1
+; AVX512F-64-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm1
+; AVX512F-64-NEXT: vmovd %edi, %xmm0
+; AVX512F-64-NEXT: vpinsrw $1, %esi, %xmm0, %xmm0
+; AVX512F-64-NEXT: vpinsrw $2, %edx, %xmm0, %xmm0
+; AVX512F-64-NEXT: vpinsrw $3, %ecx, %xmm0, %xmm0
+; AVX512F-64-NEXT: vpinsrw $4, %r8d, %xmm0, %xmm0
+; AVX512F-64-NEXT: vpinsrw $5, %r9d, %xmm0, %xmm0
+; AVX512F-64-NEXT: vpinsrw $6, {{[0-9]+}}(%rsp), %xmm0, %xmm0
+; AVX512F-64-NEXT: vpinsrw $7, {{[0-9]+}}(%rsp), %xmm0, %xmm0
+; AVX512F-64-NEXT: vmovd {{.*#+}} xmm2 = mem[0],zero,zero,zero
+; AVX512F-64-NEXT: vpinsrw $1, {{[0-9]+}}(%rsp), %xmm2, %xmm2
+; AVX512F-64-NEXT: vpinsrw $2, {{[0-9]+}}(%rsp), %xmm2, %xmm2
+; AVX512F-64-NEXT: vpinsrw $3, {{[0-9]+}}(%rsp), %xmm2, %xmm2
+; AVX512F-64-NEXT: vpinsrw $4, {{[0-9]+}}(%rsp), %xmm2, %xmm2
+; AVX512F-64-NEXT: vpinsrw $5, {{[0-9]+}}(%rsp), %xmm2, %xmm2
+; AVX512F-64-NEXT: vpinsrw $6, {{[0-9]+}}(%rsp), %xmm2, %xmm2
+; AVX512F-64-NEXT: vpinsrw $7, {{[0-9]+}}(%rsp), %xmm2, %xmm2
+; AVX512F-64-NEXT: vinserti128 $1, %xmm2, %ymm0, %ymm0
+; AVX512F-64-NEXT: retq
+;
+; AVX512BW-32-LABEL: test_buildvector_v32i16:
+; AVX512BW-32: # BB#0:
+; AVX512BW-32-NEXT: vmovd {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; AVX512BW-32-NEXT: vpinsrw $1, {{[0-9]+}}(%esp), %xmm0, %xmm0
+; AVX512BW-32-NEXT: vpinsrw $2, {{[0-9]+}}(%esp), %xmm0, %xmm0
+; AVX512BW-32-NEXT: vpinsrw $3, {{[0-9]+}}(%esp), %xmm0, %xmm0
+; AVX512BW-32-NEXT: vpinsrw $4, {{[0-9]+}}(%esp), %xmm0, %xmm0
+; AVX512BW-32-NEXT: vpinsrw $5, {{[0-9]+}}(%esp), %xmm0, %xmm0
+; AVX512BW-32-NEXT: vpinsrw $6, {{[0-9]+}}(%esp), %xmm0, %xmm0
+; AVX512BW-32-NEXT: vpinsrw $7, {{[0-9]+}}(%esp), %xmm0, %xmm0
+; AVX512BW-32-NEXT: vmovd {{.*#+}} xmm1 = mem[0],zero,zero,zero
+; AVX512BW-32-NEXT: vpinsrw $1, {{[0-9]+}}(%esp), %xmm1, %xmm1
+; AVX512BW-32-NEXT: vpinsrw $2, {{[0-9]+}}(%esp), %xmm1, %xmm1
+; AVX512BW-32-NEXT: vpinsrw $3, {{[0-9]+}}(%esp), %xmm1, %xmm1
+; AVX512BW-32-NEXT: vpinsrw $4, {{[0-9]+}}(%esp), %xmm1, %xmm1
+; AVX512BW-32-NEXT: vpinsrw $5, {{[0-9]+}}(%esp), %xmm1, %xmm1
+; AVX512BW-32-NEXT: vpinsrw $6, {{[0-9]+}}(%esp), %xmm1, %xmm1
+; AVX512BW-32-NEXT: vpinsrw $7, {{[0-9]+}}(%esp), %xmm1, %xmm1
+; AVX512BW-32-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm0
+; AVX512BW-32-NEXT: vmovd {{.*#+}} xmm1 = mem[0],zero,zero,zero
+; AVX512BW-32-NEXT: vpinsrw $1, {{[0-9]+}}(%esp), %xmm1, %xmm1
+; AVX512BW-32-NEXT: vpinsrw $2, {{[0-9]+}}(%esp), %xmm1, %xmm1
+; AVX512BW-32-NEXT: vpinsrw $3, {{[0-9]+}}(%esp), %xmm1, %xmm1
+; AVX512BW-32-NEXT: vpinsrw $4, {{[0-9]+}}(%esp), %xmm1, %xmm1
+; AVX512BW-32-NEXT: vpinsrw $5, {{[0-9]+}}(%esp), %xmm1, %xmm1
+; AVX512BW-32-NEXT: vpinsrw $6, {{[0-9]+}}(%esp), %xmm1, %xmm1
+; AVX512BW-32-NEXT: vpinsrw $7, {{[0-9]+}}(%esp), %xmm1, %xmm1
+; AVX512BW-32-NEXT: vmovd {{.*#+}} xmm2 = mem[0],zero,zero,zero
+; AVX512BW-32-NEXT: vpinsrw $1, {{[0-9]+}}(%esp), %xmm2, %xmm2
+; AVX512BW-32-NEXT: vpinsrw $2, {{[0-9]+}}(%esp), %xmm2, %xmm2
+; AVX512BW-32-NEXT: vpinsrw $3, {{[0-9]+}}(%esp), %xmm2, %xmm2
+; AVX512BW-32-NEXT: vpinsrw $4, {{[0-9]+}}(%esp), %xmm2, %xmm2
+; AVX512BW-32-NEXT: vpinsrw $5, {{[0-9]+}}(%esp), %xmm2, %xmm2
+; AVX512BW-32-NEXT: vpinsrw $6, {{[0-9]+}}(%esp), %xmm2, %xmm2
+; AVX512BW-32-NEXT: vpinsrw $7, {{[0-9]+}}(%esp), %xmm2, %xmm2
+; AVX512BW-32-NEXT: vinserti128 $1, %xmm2, %ymm1, %ymm1
+; AVX512BW-32-NEXT: vinserti64x4 $1, %ymm0, %zmm1, %zmm0
+; AVX512BW-32-NEXT: retl
+;
+; AVX512BW-64-LABEL: test_buildvector_v32i16:
+; AVX512BW-64: # BB#0:
+; AVX512BW-64-NEXT: vmovd {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; AVX512BW-64-NEXT: vpinsrw $1, {{[0-9]+}}(%rsp), %xmm0, %xmm0
+; AVX512BW-64-NEXT: vpinsrw $2, {{[0-9]+}}(%rsp), %xmm0, %xmm0
+; AVX512BW-64-NEXT: vpinsrw $3, {{[0-9]+}}(%rsp), %xmm0, %xmm0
+; AVX512BW-64-NEXT: vpinsrw $4, {{[0-9]+}}(%rsp), %xmm0, %xmm0
+; AVX512BW-64-NEXT: vpinsrw $5, {{[0-9]+}}(%rsp), %xmm0, %xmm0
+; AVX512BW-64-NEXT: vpinsrw $6, {{[0-9]+}}(%rsp), %xmm0, %xmm0
+; AVX512BW-64-NEXT: vpinsrw $7, {{[0-9]+}}(%rsp), %xmm0, %xmm0
+; AVX512BW-64-NEXT: vmovd {{.*#+}} xmm1 = mem[0],zero,zero,zero
+; AVX512BW-64-NEXT: vpinsrw $1, {{[0-9]+}}(%rsp), %xmm1, %xmm1
+; AVX512BW-64-NEXT: vpinsrw $2, {{[0-9]+}}(%rsp), %xmm1, %xmm1
+; AVX512BW-64-NEXT: vpinsrw $3, {{[0-9]+}}(%rsp), %xmm1, %xmm1
+; AVX512BW-64-NEXT: vpinsrw $4, {{[0-9]+}}(%rsp), %xmm1, %xmm1
+; AVX512BW-64-NEXT: vpinsrw $5, {{[0-9]+}}(%rsp), %xmm1, %xmm1
+; AVX512BW-64-NEXT: vpinsrw $6, {{[0-9]+}}(%rsp), %xmm1, %xmm1
+; AVX512BW-64-NEXT: vpinsrw $7, {{[0-9]+}}(%rsp), %xmm1, %xmm1
+; AVX512BW-64-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm0
+; AVX512BW-64-NEXT: vmovd %edi, %xmm1
+; AVX512BW-64-NEXT: vpinsrw $1, %esi, %xmm1, %xmm1
+; AVX512BW-64-NEXT: vpinsrw $2, %edx, %xmm1, %xmm1
+; AVX512BW-64-NEXT: vpinsrw $3, %ecx, %xmm1, %xmm1
+; AVX512BW-64-NEXT: vpinsrw $4, %r8d, %xmm1, %xmm1
+; AVX512BW-64-NEXT: vpinsrw $5, %r9d, %xmm1, %xmm1
+; AVX512BW-64-NEXT: vpinsrw $6, {{[0-9]+}}(%rsp), %xmm1, %xmm1
+; AVX512BW-64-NEXT: vpinsrw $7, {{[0-9]+}}(%rsp), %xmm1, %xmm1
+; AVX512BW-64-NEXT: vmovd {{.*#+}} xmm2 = mem[0],zero,zero,zero
+; AVX512BW-64-NEXT: vpinsrw $1, {{[0-9]+}}(%rsp), %xmm2, %xmm2
+; AVX512BW-64-NEXT: vpinsrw $2, {{[0-9]+}}(%rsp), %xmm2, %xmm2
+; AVX512BW-64-NEXT: vpinsrw $3, {{[0-9]+}}(%rsp), %xmm2, %xmm2
+; AVX512BW-64-NEXT: vpinsrw $4, {{[0-9]+}}(%rsp), %xmm2, %xmm2
+; AVX512BW-64-NEXT: vpinsrw $5, {{[0-9]+}}(%rsp), %xmm2, %xmm2
+; AVX512BW-64-NEXT: vpinsrw $6, {{[0-9]+}}(%rsp), %xmm2, %xmm2
+; AVX512BW-64-NEXT: vpinsrw $7, {{[0-9]+}}(%rsp), %xmm2, %xmm2
+; AVX512BW-64-NEXT: vinserti128 $1, %xmm2, %ymm1, %ymm1
+; AVX512BW-64-NEXT: vinserti64x4 $1, %ymm0, %zmm1, %zmm0
+; AVX512BW-64-NEXT: retq
+ %ins0 = insertelement <32 x i16> undef, i16 %a0, i32 0
+ %ins1 = insertelement <32 x i16> %ins0, i16 %a1, i32 1
+ %ins2 = insertelement <32 x i16> %ins1, i16 %a2, i32 2
+ %ins3 = insertelement <32 x i16> %ins2, i16 %a3, i32 3
+ %ins4 = insertelement <32 x i16> %ins3, i16 %a4, i32 4
+ %ins5 = insertelement <32 x i16> %ins4, i16 %a5, i32 5
+ %ins6 = insertelement <32 x i16> %ins5, i16 %a6, i32 6
+ %ins7 = insertelement <32 x i16> %ins6, i16 %a7, i32 7
+ %ins8 = insertelement <32 x i16> %ins7, i16 %a8, i32 8
+ %ins9 = insertelement <32 x i16> %ins8, i16 %a9, i32 9
+ %ins10 = insertelement <32 x i16> %ins9, i16 %a10, i32 10
+ %ins11 = insertelement <32 x i16> %ins10, i16 %a11, i32 11
+ %ins12 = insertelement <32 x i16> %ins11, i16 %a12, i32 12
+ %ins13 = insertelement <32 x i16> %ins12, i16 %a13, i32 13
+ %ins14 = insertelement <32 x i16> %ins13, i16 %a14, i32 14
+ %ins15 = insertelement <32 x i16> %ins14, i16 %a15, i32 15
+ %ins16 = insertelement <32 x i16> %ins15, i16 %a16, i32 16
+ %ins17 = insertelement <32 x i16> %ins16, i16 %a17, i32 17
+ %ins18 = insertelement <32 x i16> %ins17, i16 %a18, i32 18
+ %ins19 = insertelement <32 x i16> %ins18, i16 %a19, i32 19
+ %ins20 = insertelement <32 x i16> %ins19, i16 %a20, i32 20
+ %ins21 = insertelement <32 x i16> %ins20, i16 %a21, i32 21
+ %ins22 = insertelement <32 x i16> %ins21, i16 %a22, i32 22
+ %ins23 = insertelement <32 x i16> %ins22, i16 %a23, i32 23
+ %ins24 = insertelement <32 x i16> %ins23, i16 %a24, i32 24
+ %ins25 = insertelement <32 x i16> %ins24, i16 %a25, i32 25
+ %ins26 = insertelement <32 x i16> %ins25, i16 %a26, i32 26
+ %ins27 = insertelement <32 x i16> %ins26, i16 %a27, i32 27
+ %ins28 = insertelement <32 x i16> %ins27, i16 %a28, i32 28
+ %ins29 = insertelement <32 x i16> %ins28, i16 %a29, i32 29
+ %ins30 = insertelement <32 x i16> %ins29, i16 %a30, i32 30
+ %ins31 = insertelement <32 x i16> %ins30, i16 %a31, i32 31
+ ret <32 x i16> %ins31
+}
+
+define <64 x i8> @test_buildvector_v64i8(i8 %a0, i8 %a1, i8 %a2, i8 %a3, i8 %a4, i8 %a5, i8 %a6, i8 %a7, i8 %a8, i8 %a9, i8 %a10, i8 %a11, i8 %a12, i8 %a13, i8 %a14, i8 %a15, i8 %a16, i8 %a17, i8 %a18, i8 %a19, i8 %a20, i8 %a21, i8 %a22, i8 %a23, i8 %a24, i8 %a25, i8 %a26, i8 %a27, i8 %a28, i8 %a29, i8 %a30, i8 %a31, i8 %a32, i8 %a33, i8 %a34, i8 %a35, i8 %a36, i8 %a37, i8 %a38, i8 %a39, i8 %a40, i8 %a41, i8 %a42, i8 %a43, i8 %a44, i8 %a45, i8 %a46, i8 %a47, i8 %a48, i8 %a49, i8 %a50, i8 %a51, i8 %a52, i8 %a53, i8 %a54, i8 %a55, i8 %a56, i8 %a57, i8 %a58, i8 %a59, i8 %a60, i8 %a61, i8 %a62, i8 %a63) {
+; AVX512F-32-LABEL: test_buildvector_v64i8:
+; AVX512F-32: # BB#0:
+; AVX512F-32-NEXT: vmovd {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; AVX512F-32-NEXT: vpinsrb $1, {{[0-9]+}}(%esp), %xmm0, %xmm0
+; AVX512F-32-NEXT: vpinsrb $2, {{[0-9]+}}(%esp), %xmm0, %xmm0
+; AVX512F-32-NEXT: vpinsrb $3, {{[0-9]+}}(%esp), %xmm0, %xmm0
+; AVX512F-32-NEXT: vpinsrb $4, {{[0-9]+}}(%esp), %xmm0, %xmm0
+; AVX512F-32-NEXT: vpinsrb $5, {{[0-9]+}}(%esp), %xmm0, %xmm0
+; AVX512F-32-NEXT: vpinsrb $6, {{[0-9]+}}(%esp), %xmm0, %xmm0
+; AVX512F-32-NEXT: vpinsrb $7, {{[0-9]+}}(%esp), %xmm0, %xmm0
+; AVX512F-32-NEXT: vpinsrb $8, {{[0-9]+}}(%esp), %xmm0, %xmm0
+; AVX512F-32-NEXT: vpinsrb $9, {{[0-9]+}}(%esp), %xmm0, %xmm0
+; AVX512F-32-NEXT: vpinsrb $10, {{[0-9]+}}(%esp), %xmm0, %xmm0
+; AVX512F-32-NEXT: vpinsrb $11, {{[0-9]+}}(%esp), %xmm0, %xmm0
+; AVX512F-32-NEXT: vpinsrb $12, {{[0-9]+}}(%esp), %xmm0, %xmm0
+; AVX512F-32-NEXT: vpinsrb $13, {{[0-9]+}}(%esp), %xmm0, %xmm0
+; AVX512F-32-NEXT: vpinsrb $14, {{[0-9]+}}(%esp), %xmm0, %xmm0
+; AVX512F-32-NEXT: vpinsrb $15, {{[0-9]+}}(%esp), %xmm0, %xmm0
+; AVX512F-32-NEXT: vmovd {{.*#+}} xmm1 = mem[0],zero,zero,zero
+; AVX512F-32-NEXT: vpinsrb $1, {{[0-9]+}}(%esp), %xmm1, %xmm1
+; AVX512F-32-NEXT: vpinsrb $2, {{[0-9]+}}(%esp), %xmm1, %xmm1
+; AVX512F-32-NEXT: vpinsrb $3, {{[0-9]+}}(%esp), %xmm1, %xmm1
+; AVX512F-32-NEXT: vpinsrb $4, {{[0-9]+}}(%esp), %xmm1, %xmm1
+; AVX512F-32-NEXT: vpinsrb $5, {{[0-9]+}}(%esp), %xmm1, %xmm1
+; AVX512F-32-NEXT: vpinsrb $6, {{[0-9]+}}(%esp), %xmm1, %xmm1
+; AVX512F-32-NEXT: vpinsrb $7, {{[0-9]+}}(%esp), %xmm1, %xmm1
+; AVX512F-32-NEXT: vpinsrb $8, {{[0-9]+}}(%esp), %xmm1, %xmm1
+; AVX512F-32-NEXT: vpinsrb $9, {{[0-9]+}}(%esp), %xmm1, %xmm1
+; AVX512F-32-NEXT: vpinsrb $10, {{[0-9]+}}(%esp), %xmm1, %xmm1
+; AVX512F-32-NEXT: vpinsrb $11, {{[0-9]+}}(%esp), %xmm1, %xmm1
+; AVX512F-32-NEXT: vpinsrb $12, {{[0-9]+}}(%esp), %xmm1, %xmm1
+; AVX512F-32-NEXT: vpinsrb $13, {{[0-9]+}}(%esp), %xmm1, %xmm1
+; AVX512F-32-NEXT: vpinsrb $14, {{[0-9]+}}(%esp), %xmm1, %xmm1
+; AVX512F-32-NEXT: vpinsrb $15, {{[0-9]+}}(%esp), %xmm1, %xmm1
+; AVX512F-32-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm0
+; AVX512F-32-NEXT: vmovd {{.*#+}} xmm1 = mem[0],zero,zero,zero
+; AVX512F-32-NEXT: vpinsrb $1, {{[0-9]+}}(%esp), %xmm1, %xmm1
+; AVX512F-32-NEXT: vpinsrb $2, {{[0-9]+}}(%esp), %xmm1, %xmm1
+; AVX512F-32-NEXT: vpinsrb $3, {{[0-9]+}}(%esp), %xmm1, %xmm1
+; AVX512F-32-NEXT: vpinsrb $4, {{[0-9]+}}(%esp), %xmm1, %xmm1
+; AVX512F-32-NEXT: vpinsrb $5, {{[0-9]+}}(%esp), %xmm1, %xmm1
+; AVX512F-32-NEXT: vpinsrb $6, {{[0-9]+}}(%esp), %xmm1, %xmm1
+; AVX512F-32-NEXT: vpinsrb $7, {{[0-9]+}}(%esp), %xmm1, %xmm1
+; AVX512F-32-NEXT: vpinsrb $8, {{[0-9]+}}(%esp), %xmm1, %xmm1
+; AVX512F-32-NEXT: vpinsrb $9, {{[0-9]+}}(%esp), %xmm1, %xmm1
+; AVX512F-32-NEXT: vpinsrb $10, {{[0-9]+}}(%esp), %xmm1, %xmm1
+; AVX512F-32-NEXT: vpinsrb $11, {{[0-9]+}}(%esp), %xmm1, %xmm1
+; AVX512F-32-NEXT: vpinsrb $12, {{[0-9]+}}(%esp), %xmm1, %xmm1
+; AVX512F-32-NEXT: vpinsrb $13, {{[0-9]+}}(%esp), %xmm1, %xmm1
+; AVX512F-32-NEXT: vpinsrb $14, {{[0-9]+}}(%esp), %xmm1, %xmm1
+; AVX512F-32-NEXT: vpinsrb $15, {{[0-9]+}}(%esp), %xmm1, %xmm1
+; AVX512F-32-NEXT: vmovd {{.*#+}} xmm2 = mem[0],zero,zero,zero
+; AVX512F-32-NEXT: vpinsrb $1, {{[0-9]+}}(%esp), %xmm2, %xmm2
+; AVX512F-32-NEXT: vpinsrb $2, {{[0-9]+}}(%esp), %xmm2, %xmm2
+; AVX512F-32-NEXT: vpinsrb $3, {{[0-9]+}}(%esp), %xmm2, %xmm2
+; AVX512F-32-NEXT: vpinsrb $4, {{[0-9]+}}(%esp), %xmm2, %xmm2
+; AVX512F-32-NEXT: vpinsrb $5, {{[0-9]+}}(%esp), %xmm2, %xmm2
+; AVX512F-32-NEXT: vpinsrb $6, {{[0-9]+}}(%esp), %xmm2, %xmm2
+; AVX512F-32-NEXT: vpinsrb $7, {{[0-9]+}}(%esp), %xmm2, %xmm2
+; AVX512F-32-NEXT: vpinsrb $8, {{[0-9]+}}(%esp), %xmm2, %xmm2
+; AVX512F-32-NEXT: vpinsrb $9, {{[0-9]+}}(%esp), %xmm2, %xmm2
+; AVX512F-32-NEXT: vpinsrb $10, {{[0-9]+}}(%esp), %xmm2, %xmm2
+; AVX512F-32-NEXT: vpinsrb $11, {{[0-9]+}}(%esp), %xmm2, %xmm2
+; AVX512F-32-NEXT: vpinsrb $12, {{[0-9]+}}(%esp), %xmm2, %xmm2
+; AVX512F-32-NEXT: vpinsrb $13, {{[0-9]+}}(%esp), %xmm2, %xmm2
+; AVX512F-32-NEXT: vpinsrb $14, {{[0-9]+}}(%esp), %xmm2, %xmm2
+; AVX512F-32-NEXT: vpinsrb $15, {{[0-9]+}}(%esp), %xmm2, %xmm2
+; AVX512F-32-NEXT: vinserti128 $1, %xmm2, %ymm1, %ymm1
+; AVX512F-32-NEXT: retl
+;
+; AVX512F-64-LABEL: test_buildvector_v64i8:
+; AVX512F-64: # BB#0:
+; AVX512F-64-NEXT: vmovd {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; AVX512F-64-NEXT: vpinsrb $1, {{[0-9]+}}(%rsp), %xmm0, %xmm0
+; AVX512F-64-NEXT: vpinsrb $2, {{[0-9]+}}(%rsp), %xmm0, %xmm0
+; AVX512F-64-NEXT: vpinsrb $3, {{[0-9]+}}(%rsp), %xmm0, %xmm0
+; AVX512F-64-NEXT: vpinsrb $4, {{[0-9]+}}(%rsp), %xmm0, %xmm0
+; AVX512F-64-NEXT: vpinsrb $5, {{[0-9]+}}(%rsp), %xmm0, %xmm0
+; AVX512F-64-NEXT: vpinsrb $6, {{[0-9]+}}(%rsp), %xmm0, %xmm0
+; AVX512F-64-NEXT: vpinsrb $7, {{[0-9]+}}(%rsp), %xmm0, %xmm0
+; AVX512F-64-NEXT: vpinsrb $8, {{[0-9]+}}(%rsp), %xmm0, %xmm0
+; AVX512F-64-NEXT: vpinsrb $9, {{[0-9]+}}(%rsp), %xmm0, %xmm0
+; AVX512F-64-NEXT: vpinsrb $10, {{[0-9]+}}(%rsp), %xmm0, %xmm0
+; AVX512F-64-NEXT: vpinsrb $11, {{[0-9]+}}(%rsp), %xmm0, %xmm0
+; AVX512F-64-NEXT: vpinsrb $12, {{[0-9]+}}(%rsp), %xmm0, %xmm0
+; AVX512F-64-NEXT: vpinsrb $13, {{[0-9]+}}(%rsp), %xmm0, %xmm0
+; AVX512F-64-NEXT: vpinsrb $14, {{[0-9]+}}(%rsp), %xmm0, %xmm0
+; AVX512F-64-NEXT: vpinsrb $15, {{[0-9]+}}(%rsp), %xmm0, %xmm0
+; AVX512F-64-NEXT: vmovd {{.*#+}} xmm1 = mem[0],zero,zero,zero
+; AVX512F-64-NEXT: vpinsrb $1, {{[0-9]+}}(%rsp), %xmm1, %xmm1
+; AVX512F-64-NEXT: vpinsrb $2, {{[0-9]+}}(%rsp), %xmm1, %xmm1
+; AVX512F-64-NEXT: vpinsrb $3, {{[0-9]+}}(%rsp), %xmm1, %xmm1
+; AVX512F-64-NEXT: vpinsrb $4, {{[0-9]+}}(%rsp), %xmm1, %xmm1
+; AVX512F-64-NEXT: vpinsrb $5, {{[0-9]+}}(%rsp), %xmm1, %xmm1
+; AVX512F-64-NEXT: vpinsrb $6, {{[0-9]+}}(%rsp), %xmm1, %xmm1
+; AVX512F-64-NEXT: vpinsrb $7, {{[0-9]+}}(%rsp), %xmm1, %xmm1
+; AVX512F-64-NEXT: vpinsrb $8, {{[0-9]+}}(%rsp), %xmm1, %xmm1
+; AVX512F-64-NEXT: vpinsrb $9, {{[0-9]+}}(%rsp), %xmm1, %xmm1
+; AVX512F-64-NEXT: vpinsrb $10, {{[0-9]+}}(%rsp), %xmm1, %xmm1
+; AVX512F-64-NEXT: vpinsrb $11, {{[0-9]+}}(%rsp), %xmm1, %xmm1
+; AVX512F-64-NEXT: vpinsrb $12, {{[0-9]+}}(%rsp), %xmm1, %xmm1
+; AVX512F-64-NEXT: vpinsrb $13, {{[0-9]+}}(%rsp), %xmm1, %xmm1
+; AVX512F-64-NEXT: vpinsrb $14, {{[0-9]+}}(%rsp), %xmm1, %xmm1
+; AVX512F-64-NEXT: vpinsrb $15, {{[0-9]+}}(%rsp), %xmm1, %xmm1
+; AVX512F-64-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm1
+; AVX512F-64-NEXT: vmovd %edi, %xmm0
+; AVX512F-64-NEXT: vpinsrb $1, %esi, %xmm0, %xmm0
+; AVX512F-64-NEXT: vpinsrb $2, %edx, %xmm0, %xmm0
+; AVX512F-64-NEXT: vpinsrb $3, %ecx, %xmm0, %xmm0
+; AVX512F-64-NEXT: vpinsrb $4, %r8d, %xmm0, %xmm0
+; AVX512F-64-NEXT: vpinsrb $5, %r9d, %xmm0, %xmm0
+; AVX512F-64-NEXT: vpinsrb $6, {{[0-9]+}}(%rsp), %xmm0, %xmm0
+; AVX512F-64-NEXT: vpinsrb $7, {{[0-9]+}}(%rsp), %xmm0, %xmm0
+; AVX512F-64-NEXT: vpinsrb $8, {{[0-9]+}}(%rsp), %xmm0, %xmm0
+; AVX512F-64-NEXT: vpinsrb $9, {{[0-9]+}}(%rsp), %xmm0, %xmm0
+; AVX512F-64-NEXT: vpinsrb $10, {{[0-9]+}}(%rsp), %xmm0, %xmm0
+; AVX512F-64-NEXT: vpinsrb $11, {{[0-9]+}}(%rsp), %xmm0, %xmm0
+; AVX512F-64-NEXT: vpinsrb $12, {{[0-9]+}}(%rsp), %xmm0, %xmm0
+; AVX512F-64-NEXT: vpinsrb $13, {{[0-9]+}}(%rsp), %xmm0, %xmm0
+; AVX512F-64-NEXT: vpinsrb $14, {{[0-9]+}}(%rsp), %xmm0, %xmm0
+; AVX512F-64-NEXT: vpinsrb $15, {{[0-9]+}}(%rsp), %xmm0, %xmm0
+; AVX512F-64-NEXT: vmovd {{.*#+}} xmm2 = mem[0],zero,zero,zero
+; AVX512F-64-NEXT: vpinsrb $1, {{[0-9]+}}(%rsp), %xmm2, %xmm2
+; AVX512F-64-NEXT: vpinsrb $2, {{[0-9]+}}(%rsp), %xmm2, %xmm2
+; AVX512F-64-NEXT: vpinsrb $3, {{[0-9]+}}(%rsp), %xmm2, %xmm2
+; AVX512F-64-NEXT: vpinsrb $4, {{[0-9]+}}(%rsp), %xmm2, %xmm2
+; AVX512F-64-NEXT: vpinsrb $5, {{[0-9]+}}(%rsp), %xmm2, %xmm2
+; AVX512F-64-NEXT: vpinsrb $6, {{[0-9]+}}(%rsp), %xmm2, %xmm2
+; AVX512F-64-NEXT: vpinsrb $7, {{[0-9]+}}(%rsp), %xmm2, %xmm2
+; AVX512F-64-NEXT: vpinsrb $8, {{[0-9]+}}(%rsp), %xmm2, %xmm2
+; AVX512F-64-NEXT: vpinsrb $9, {{[0-9]+}}(%rsp), %xmm2, %xmm2
+; AVX512F-64-NEXT: vpinsrb $10, {{[0-9]+}}(%rsp), %xmm2, %xmm2
+; AVX512F-64-NEXT: vpinsrb $11, {{[0-9]+}}(%rsp), %xmm2, %xmm2
+; AVX512F-64-NEXT: vpinsrb $12, {{[0-9]+}}(%rsp), %xmm2, %xmm2
+; AVX512F-64-NEXT: vpinsrb $13, {{[0-9]+}}(%rsp), %xmm2, %xmm2
+; AVX512F-64-NEXT: vpinsrb $14, {{[0-9]+}}(%rsp), %xmm2, %xmm2
+; AVX512F-64-NEXT: vpinsrb $15, {{[0-9]+}}(%rsp), %xmm2, %xmm2
+; AVX512F-64-NEXT: vinserti128 $1, %xmm2, %ymm0, %ymm0
+; AVX512F-64-NEXT: retq
+;
+; AVX512BW-32-LABEL: test_buildvector_v64i8:
+; AVX512BW-32: # BB#0:
+; AVX512BW-32-NEXT: vmovd {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; AVX512BW-32-NEXT: vpinsrb $1, {{[0-9]+}}(%esp), %xmm0, %xmm0
+; AVX512BW-32-NEXT: vpinsrb $2, {{[0-9]+}}(%esp), %xmm0, %xmm0
+; AVX512BW-32-NEXT: vpinsrb $3, {{[0-9]+}}(%esp), %xmm0, %xmm0
+; AVX512BW-32-NEXT: vpinsrb $4, {{[0-9]+}}(%esp), %xmm0, %xmm0
+; AVX512BW-32-NEXT: vpinsrb $5, {{[0-9]+}}(%esp), %xmm0, %xmm0
+; AVX512BW-32-NEXT: vpinsrb $6, {{[0-9]+}}(%esp), %xmm0, %xmm0
+; AVX512BW-32-NEXT: vpinsrb $7, {{[0-9]+}}(%esp), %xmm0, %xmm0
+; AVX512BW-32-NEXT: vpinsrb $8, {{[0-9]+}}(%esp), %xmm0, %xmm0
+; AVX512BW-32-NEXT: vpinsrb $9, {{[0-9]+}}(%esp), %xmm0, %xmm0
+; AVX512BW-32-NEXT: vpinsrb $10, {{[0-9]+}}(%esp), %xmm0, %xmm0
+; AVX512BW-32-NEXT: vpinsrb $11, {{[0-9]+}}(%esp), %xmm0, %xmm0
+; AVX512BW-32-NEXT: vpinsrb $12, {{[0-9]+}}(%esp), %xmm0, %xmm0
+; AVX512BW-32-NEXT: vpinsrb $13, {{[0-9]+}}(%esp), %xmm0, %xmm0
+; AVX512BW-32-NEXT: vpinsrb $14, {{[0-9]+}}(%esp), %xmm0, %xmm0
+; AVX512BW-32-NEXT: vpinsrb $15, {{[0-9]+}}(%esp), %xmm0, %xmm0
+; AVX512BW-32-NEXT: vmovd {{.*#+}} xmm1 = mem[0],zero,zero,zero
+; AVX512BW-32-NEXT: vpinsrb $1, {{[0-9]+}}(%esp), %xmm1, %xmm1
+; AVX512BW-32-NEXT: vpinsrb $2, {{[0-9]+}}(%esp), %xmm1, %xmm1
+; AVX512BW-32-NEXT: vpinsrb $3, {{[0-9]+}}(%esp), %xmm1, %xmm1
+; AVX512BW-32-NEXT: vpinsrb $4, {{[0-9]+}}(%esp), %xmm1, %xmm1
+; AVX512BW-32-NEXT: vpinsrb $5, {{[0-9]+}}(%esp), %xmm1, %xmm1
+; AVX512BW-32-NEXT: vpinsrb $6, {{[0-9]+}}(%esp), %xmm1, %xmm1
+; AVX512BW-32-NEXT: vpinsrb $7, {{[0-9]+}}(%esp), %xmm1, %xmm1
+; AVX512BW-32-NEXT: vpinsrb $8, {{[0-9]+}}(%esp), %xmm1, %xmm1
+; AVX512BW-32-NEXT: vpinsrb $9, {{[0-9]+}}(%esp), %xmm1, %xmm1
+; AVX512BW-32-NEXT: vpinsrb $10, {{[0-9]+}}(%esp), %xmm1, %xmm1
+; AVX512BW-32-NEXT: vpinsrb $11, {{[0-9]+}}(%esp), %xmm1, %xmm1
+; AVX512BW-32-NEXT: vpinsrb $12, {{[0-9]+}}(%esp), %xmm1, %xmm1
+; AVX512BW-32-NEXT: vpinsrb $13, {{[0-9]+}}(%esp), %xmm1, %xmm1
+; AVX512BW-32-NEXT: vpinsrb $14, {{[0-9]+}}(%esp), %xmm1, %xmm1
+; AVX512BW-32-NEXT: vpinsrb $15, {{[0-9]+}}(%esp), %xmm1, %xmm1
+; AVX512BW-32-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm0
+; AVX512BW-32-NEXT: vmovd {{.*#+}} xmm1 = mem[0],zero,zero,zero
+; AVX512BW-32-NEXT: vpinsrb $1, {{[0-9]+}}(%esp), %xmm1, %xmm1
+; AVX512BW-32-NEXT: vpinsrb $2, {{[0-9]+}}(%esp), %xmm1, %xmm1
+; AVX512BW-32-NEXT: vpinsrb $3, {{[0-9]+}}(%esp), %xmm1, %xmm1
+; AVX512BW-32-NEXT: vpinsrb $4, {{[0-9]+}}(%esp), %xmm1, %xmm1
+; AVX512BW-32-NEXT: vpinsrb $5, {{[0-9]+}}(%esp), %xmm1, %xmm1
+; AVX512BW-32-NEXT: vpinsrb $6, {{[0-9]+}}(%esp), %xmm1, %xmm1
+; AVX512BW-32-NEXT: vpinsrb $7, {{[0-9]+}}(%esp), %xmm1, %xmm1
+; AVX512BW-32-NEXT: vpinsrb $8, {{[0-9]+}}(%esp), %xmm1, %xmm1
+; AVX512BW-32-NEXT: vpinsrb $9, {{[0-9]+}}(%esp), %xmm1, %xmm1
+; AVX512BW-32-NEXT: vpinsrb $10, {{[0-9]+}}(%esp), %xmm1, %xmm1
+; AVX512BW-32-NEXT: vpinsrb $11, {{[0-9]+}}(%esp), %xmm1, %xmm1
+; AVX512BW-32-NEXT: vpinsrb $12, {{[0-9]+}}(%esp), %xmm1, %xmm1
+; AVX512BW-32-NEXT: vpinsrb $13, {{[0-9]+}}(%esp), %xmm1, %xmm1
+; AVX512BW-32-NEXT: vpinsrb $14, {{[0-9]+}}(%esp), %xmm1, %xmm1
+; AVX512BW-32-NEXT: vpinsrb $15, {{[0-9]+}}(%esp), %xmm1, %xmm1
+; AVX512BW-32-NEXT: vmovd {{.*#+}} xmm2 = mem[0],zero,zero,zero
+; AVX512BW-32-NEXT: vpinsrb $1, {{[0-9]+}}(%esp), %xmm2, %xmm2
+; AVX512BW-32-NEXT: vpinsrb $2, {{[0-9]+}}(%esp), %xmm2, %xmm2
+; AVX512BW-32-NEXT: vpinsrb $3, {{[0-9]+}}(%esp), %xmm2, %xmm2
+; AVX512BW-32-NEXT: vpinsrb $4, {{[0-9]+}}(%esp), %xmm2, %xmm2
+; AVX512BW-32-NEXT: vpinsrb $5, {{[0-9]+}}(%esp), %xmm2, %xmm2
+; AVX512BW-32-NEXT: vpinsrb $6, {{[0-9]+}}(%esp), %xmm2, %xmm2
+; AVX512BW-32-NEXT: vpinsrb $7, {{[0-9]+}}(%esp), %xmm2, %xmm2
+; AVX512BW-32-NEXT: vpinsrb $8, {{[0-9]+}}(%esp), %xmm2, %xmm2
+; AVX512BW-32-NEXT: vpinsrb $9, {{[0-9]+}}(%esp), %xmm2, %xmm2
+; AVX512BW-32-NEXT: vpinsrb $10, {{[0-9]+}}(%esp), %xmm2, %xmm2
+; AVX512BW-32-NEXT: vpinsrb $11, {{[0-9]+}}(%esp), %xmm2, %xmm2
+; AVX512BW-32-NEXT: vpinsrb $12, {{[0-9]+}}(%esp), %xmm2, %xmm2
+; AVX512BW-32-NEXT: vpinsrb $13, {{[0-9]+}}(%esp), %xmm2, %xmm2
+; AVX512BW-32-NEXT: vpinsrb $14, {{[0-9]+}}(%esp), %xmm2, %xmm2
+; AVX512BW-32-NEXT: vpinsrb $15, {{[0-9]+}}(%esp), %xmm2, %xmm2
+; AVX512BW-32-NEXT: vinserti128 $1, %xmm2, %ymm1, %ymm1
+; AVX512BW-32-NEXT: vinserti64x4 $1, %ymm0, %zmm1, %zmm0
+; AVX512BW-32-NEXT: retl
+;
+; AVX512BW-64-LABEL: test_buildvector_v64i8:
+; AVX512BW-64: # BB#0:
+; AVX512BW-64-NEXT: vmovd {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; AVX512BW-64-NEXT: vpinsrb $1, {{[0-9]+}}(%rsp), %xmm0, %xmm0
+; AVX512BW-64-NEXT: vpinsrb $2, {{[0-9]+}}(%rsp), %xmm0, %xmm0
+; AVX512BW-64-NEXT: vpinsrb $3, {{[0-9]+}}(%rsp), %xmm0, %xmm0
+; AVX512BW-64-NEXT: vpinsrb $4, {{[0-9]+}}(%rsp), %xmm0, %xmm0
+; AVX512BW-64-NEXT: vpinsrb $5, {{[0-9]+}}(%rsp), %xmm0, %xmm0
+; AVX512BW-64-NEXT: vpinsrb $6, {{[0-9]+}}(%rsp), %xmm0, %xmm0
+; AVX512BW-64-NEXT: vpinsrb $7, {{[0-9]+}}(%rsp), %xmm0, %xmm0
+; AVX512BW-64-NEXT: vpinsrb $8, {{[0-9]+}}(%rsp), %xmm0, %xmm0
+; AVX512BW-64-NEXT: vpinsrb $9, {{[0-9]+}}(%rsp), %xmm0, %xmm0
+; AVX512BW-64-NEXT: vpinsrb $10, {{[0-9]+}}(%rsp), %xmm0, %xmm0
+; AVX512BW-64-NEXT: vpinsrb $11, {{[0-9]+}}(%rsp), %xmm0, %xmm0
+; AVX512BW-64-NEXT: vpinsrb $12, {{[0-9]+}}(%rsp), %xmm0, %xmm0
+; AVX512BW-64-NEXT: vpinsrb $13, {{[0-9]+}}(%rsp), %xmm0, %xmm0
+; AVX512BW-64-NEXT: vpinsrb $14, {{[0-9]+}}(%rsp), %xmm0, %xmm0
+; AVX512BW-64-NEXT: vpinsrb $15, {{[0-9]+}}(%rsp), %xmm0, %xmm0
+; AVX512BW-64-NEXT: vmovd {{.*#+}} xmm1 = mem[0],zero,zero,zero
+; AVX512BW-64-NEXT: vpinsrb $1, {{[0-9]+}}(%rsp), %xmm1, %xmm1
+; AVX512BW-64-NEXT: vpinsrb $2, {{[0-9]+}}(%rsp), %xmm1, %xmm1
+; AVX512BW-64-NEXT: vpinsrb $3, {{[0-9]+}}(%rsp), %xmm1, %xmm1
+; AVX512BW-64-NEXT: vpinsrb $4, {{[0-9]+}}(%rsp), %xmm1, %xmm1
+; AVX512BW-64-NEXT: vpinsrb $5, {{[0-9]+}}(%rsp), %xmm1, %xmm1
+; AVX512BW-64-NEXT: vpinsrb $6, {{[0-9]+}}(%rsp), %xmm1, %xmm1
+; AVX512BW-64-NEXT: vpinsrb $7, {{[0-9]+}}(%rsp), %xmm1, %xmm1
+; AVX512BW-64-NEXT: vpinsrb $8, {{[0-9]+}}(%rsp), %xmm1, %xmm1
+; AVX512BW-64-NEXT: vpinsrb $9, {{[0-9]+}}(%rsp), %xmm1, %xmm1
+; AVX512BW-64-NEXT: vpinsrb $10, {{[0-9]+}}(%rsp), %xmm1, %xmm1
+; AVX512BW-64-NEXT: vpinsrb $11, {{[0-9]+}}(%rsp), %xmm1, %xmm1
+; AVX512BW-64-NEXT: vpinsrb $12, {{[0-9]+}}(%rsp), %xmm1, %xmm1
+; AVX512BW-64-NEXT: vpinsrb $13, {{[0-9]+}}(%rsp), %xmm1, %xmm1
+; AVX512BW-64-NEXT: vpinsrb $14, {{[0-9]+}}(%rsp), %xmm1, %xmm1
+; AVX512BW-64-NEXT: vpinsrb $15, {{[0-9]+}}(%rsp), %xmm1, %xmm1
+; AVX512BW-64-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm0
+; AVX512BW-64-NEXT: vmovd %edi, %xmm1
+; AVX512BW-64-NEXT: vpinsrb $1, %esi, %xmm1, %xmm1
+; AVX512BW-64-NEXT: vpinsrb $2, %edx, %xmm1, %xmm1
+; AVX512BW-64-NEXT: vpinsrb $3, %ecx, %xmm1, %xmm1
+; AVX512BW-64-NEXT: vpinsrb $4, %r8d, %xmm1, %xmm1
+; AVX512BW-64-NEXT: vpinsrb $5, %r9d, %xmm1, %xmm1
+; AVX512BW-64-NEXT: vpinsrb $6, {{[0-9]+}}(%rsp), %xmm1, %xmm1
+; AVX512BW-64-NEXT: vpinsrb $7, {{[0-9]+}}(%rsp), %xmm1, %xmm1
+; AVX512BW-64-NEXT: vpinsrb $8, {{[0-9]+}}(%rsp), %xmm1, %xmm1
+; AVX512BW-64-NEXT: vpinsrb $9, {{[0-9]+}}(%rsp), %xmm1, %xmm1
+; AVX512BW-64-NEXT: vpinsrb $10, {{[0-9]+}}(%rsp), %xmm1, %xmm1
+; AVX512BW-64-NEXT: vpinsrb $11, {{[0-9]+}}(%rsp), %xmm1, %xmm1
+; AVX512BW-64-NEXT: vpinsrb $12, {{[0-9]+}}(%rsp), %xmm1, %xmm1
+; AVX512BW-64-NEXT: vpinsrb $13, {{[0-9]+}}(%rsp), %xmm1, %xmm1
+; AVX512BW-64-NEXT: vpinsrb $14, {{[0-9]+}}(%rsp), %xmm1, %xmm1
+; AVX512BW-64-NEXT: vpinsrb $15, {{[0-9]+}}(%rsp), %xmm1, %xmm1
+; AVX512BW-64-NEXT: vmovd {{.*#+}} xmm2 = mem[0],zero,zero,zero
+; AVX512BW-64-NEXT: vpinsrb $1, {{[0-9]+}}(%rsp), %xmm2, %xmm2
+; AVX512BW-64-NEXT: vpinsrb $2, {{[0-9]+}}(%rsp), %xmm2, %xmm2
+; AVX512BW-64-NEXT: vpinsrb $3, {{[0-9]+}}(%rsp), %xmm2, %xmm2
+; AVX512BW-64-NEXT: vpinsrb $4, {{[0-9]+}}(%rsp), %xmm2, %xmm2
+; AVX512BW-64-NEXT: vpinsrb $5, {{[0-9]+}}(%rsp), %xmm2, %xmm2
+; AVX512BW-64-NEXT: vpinsrb $6, {{[0-9]+}}(%rsp), %xmm2, %xmm2
+; AVX512BW-64-NEXT: vpinsrb $7, {{[0-9]+}}(%rsp), %xmm2, %xmm2
+; AVX512BW-64-NEXT: vpinsrb $8, {{[0-9]+}}(%rsp), %xmm2, %xmm2
+; AVX512BW-64-NEXT: vpinsrb $9, {{[0-9]+}}(%rsp), %xmm2, %xmm2
+; AVX512BW-64-NEXT: vpinsrb $10, {{[0-9]+}}(%rsp), %xmm2, %xmm2
+; AVX512BW-64-NEXT: vpinsrb $11, {{[0-9]+}}(%rsp), %xmm2, %xmm2
+; AVX512BW-64-NEXT: vpinsrb $12, {{[0-9]+}}(%rsp), %xmm2, %xmm2
+; AVX512BW-64-NEXT: vpinsrb $13, {{[0-9]+}}(%rsp), %xmm2, %xmm2
+; AVX512BW-64-NEXT: vpinsrb $14, {{[0-9]+}}(%rsp), %xmm2, %xmm2
+; AVX512BW-64-NEXT: vpinsrb $15, {{[0-9]+}}(%rsp), %xmm2, %xmm2
+; AVX512BW-64-NEXT: vinserti128 $1, %xmm2, %ymm1, %ymm1
+; AVX512BW-64-NEXT: vinserti64x4 $1, %ymm0, %zmm1, %zmm0
+; AVX512BW-64-NEXT: retq
+ %ins0 = insertelement <64 x i8> undef, i8 %a0, i32 0
+ %ins1 = insertelement <64 x i8> %ins0, i8 %a1, i32 1
+ %ins2 = insertelement <64 x i8> %ins1, i8 %a2, i32 2
+ %ins3 = insertelement <64 x i8> %ins2, i8 %a3, i32 3
+ %ins4 = insertelement <64 x i8> %ins3, i8 %a4, i32 4
+ %ins5 = insertelement <64 x i8> %ins4, i8 %a5, i32 5
+ %ins6 = insertelement <64 x i8> %ins5, i8 %a6, i32 6
+ %ins7 = insertelement <64 x i8> %ins6, i8 %a7, i32 7
+ %ins8 = insertelement <64 x i8> %ins7, i8 %a8, i32 8
+ %ins9 = insertelement <64 x i8> %ins8, i8 %a9, i32 9
+ %ins10 = insertelement <64 x i8> %ins9, i8 %a10, i32 10
+ %ins11 = insertelement <64 x i8> %ins10, i8 %a11, i32 11
+ %ins12 = insertelement <64 x i8> %ins11, i8 %a12, i32 12
+ %ins13 = insertelement <64 x i8> %ins12, i8 %a13, i32 13
+ %ins14 = insertelement <64 x i8> %ins13, i8 %a14, i32 14
+ %ins15 = insertelement <64 x i8> %ins14, i8 %a15, i32 15
+ %ins16 = insertelement <64 x i8> %ins15, i8 %a16, i32 16
+ %ins17 = insertelement <64 x i8> %ins16, i8 %a17, i32 17
+ %ins18 = insertelement <64 x i8> %ins17, i8 %a18, i32 18
+ %ins19 = insertelement <64 x i8> %ins18, i8 %a19, i32 19
+ %ins20 = insertelement <64 x i8> %ins19, i8 %a20, i32 20
+ %ins21 = insertelement <64 x i8> %ins20, i8 %a21, i32 21
+ %ins22 = insertelement <64 x i8> %ins21, i8 %a22, i32 22
+ %ins23 = insertelement <64 x i8> %ins22, i8 %a23, i32 23
+ %ins24 = insertelement <64 x i8> %ins23, i8 %a24, i32 24
+ %ins25 = insertelement <64 x i8> %ins24, i8 %a25, i32 25
+ %ins26 = insertelement <64 x i8> %ins25, i8 %a26, i32 26
+ %ins27 = insertelement <64 x i8> %ins26, i8 %a27, i32 27
+ %ins28 = insertelement <64 x i8> %ins27, i8 %a28, i32 28
+ %ins29 = insertelement <64 x i8> %ins28, i8 %a29, i32 29
+ %ins30 = insertelement <64 x i8> %ins29, i8 %a30, i32 30
+ %ins31 = insertelement <64 x i8> %ins30, i8 %a31, i32 31
+ %ins32 = insertelement <64 x i8> %ins31, i8 %a32, i32 32
+ %ins33 = insertelement <64 x i8> %ins32, i8 %a33, i32 33
+ %ins34 = insertelement <64 x i8> %ins33, i8 %a34, i32 34
+ %ins35 = insertelement <64 x i8> %ins34, i8 %a35, i32 35
+ %ins36 = insertelement <64 x i8> %ins35, i8 %a36, i32 36
+ %ins37 = insertelement <64 x i8> %ins36, i8 %a37, i32 37
+ %ins38 = insertelement <64 x i8> %ins37, i8 %a38, i32 38
+ %ins39 = insertelement <64 x i8> %ins38, i8 %a39, i32 39
+ %ins40 = insertelement <64 x i8> %ins39, i8 %a40, i32 40
+ %ins41 = insertelement <64 x i8> %ins40, i8 %a41, i32 41
+ %ins42 = insertelement <64 x i8> %ins41, i8 %a42, i32 42
+ %ins43 = insertelement <64 x i8> %ins42, i8 %a43, i32 43
+ %ins44 = insertelement <64 x i8> %ins43, i8 %a44, i32 44
+ %ins45 = insertelement <64 x i8> %ins44, i8 %a45, i32 45
+ %ins46 = insertelement <64 x i8> %ins45, i8 %a46, i32 46
+ %ins47 = insertelement <64 x i8> %ins46, i8 %a47, i32 47
+ %ins48 = insertelement <64 x i8> %ins47, i8 %a48, i32 48
+ %ins49 = insertelement <64 x i8> %ins48, i8 %a49, i32 49
+ %ins50 = insertelement <64 x i8> %ins49, i8 %a50, i32 50
+ %ins51 = insertelement <64 x i8> %ins50, i8 %a51, i32 51
+ %ins52 = insertelement <64 x i8> %ins51, i8 %a52, i32 52
+ %ins53 = insertelement <64 x i8> %ins52, i8 %a53, i32 53
+ %ins54 = insertelement <64 x i8> %ins53, i8 %a54, i32 54
+ %ins55 = insertelement <64 x i8> %ins54, i8 %a55, i32 55
+ %ins56 = insertelement <64 x i8> %ins55, i8 %a56, i32 56
+ %ins57 = insertelement <64 x i8> %ins56, i8 %a57, i32 57
+ %ins58 = insertelement <64 x i8> %ins57, i8 %a58, i32 58
+ %ins59 = insertelement <64 x i8> %ins58, i8 %a59, i32 59
+ %ins60 = insertelement <64 x i8> %ins59, i8 %a60, i32 60
+ %ins61 = insertelement <64 x i8> %ins60, i8 %a61, i32 61
+ %ins62 = insertelement <64 x i8> %ins61, i8 %a62, i32 62
+ %ins63 = insertelement <64 x i8> %ins62, i8 %a63, i32 63
+ ret <64 x i8> %ins63
+}
diff --git a/test/CodeGen/X86/combine-abs.ll b/test/CodeGen/X86/combine-abs.ll
index ac8f790a2ead..887abe99f6ed 100644
--- a/test/CodeGen/X86/combine-abs.ll
+++ b/test/CodeGen/X86/combine-abs.ll
@@ -1,5 +1,7 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx2 | FileCheck %s
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx2 | FileCheck %s --check-prefix=CHECK --check-prefix=AVX2
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512f | FileCheck %s --check-prefix=CHECK --check-prefix=AVX512 --check-prefix=AVX512F
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512f,+avx512vl | FileCheck %s --check-prefix=CHECK --check-prefix=AVX512 --check-prefix=AVX512VL
; fold (abs c1) -> c2
define <4 x i32> @combine_v4i32_abs_constant() {
@@ -27,10 +29,10 @@ define <8 x i16> @combine_v8i16_abs_abs(<8 x i16> %a) {
; CHECK-NEXT: vpabsw %xmm0, %xmm0
; CHECK-NEXT: retq
%a1 = call <8 x i16> @llvm.x86.ssse3.pabs.w.128(<8 x i16> %a)
- %n2 = sub <8 x i16> zeroinitializer, %a1
- %c2 = icmp slt <8 x i16> %a1, zeroinitializer
- %a2 = select <8 x i1> %c2, <8 x i16> %n2, <8 x i16> %a1
- ret <8 x i16> %a2
+ %s2 = ashr <8 x i16> %a1, <i16 15, i16 15, i16 15, i16 15, i16 15, i16 15, i16 15, i16 15>
+ %a2 = add <8 x i16> %a1, %s2
+ %x2 = xor <8 x i16> %a2, %s2
+ ret <8 x i16> %x2
}
define <32 x i8> @combine_v32i8_abs_abs(<32 x i8> %a) {
@@ -46,17 +48,29 @@ define <32 x i8> @combine_v32i8_abs_abs(<32 x i8> %a) {
}
define <4 x i64> @combine_v4i64_abs_abs(<4 x i64> %a) {
-; CHECK-LABEL: combine_v4i64_abs_abs:
-; CHECK: # BB#0:
-; CHECK-NEXT: vpsrad $31, %ymm0, %ymm1
-; CHECK-NEXT: vpshufd {{.*#+}} ymm1 = ymm1[1,1,3,3,5,5,7,7]
-; CHECK-NEXT: vpaddq %ymm1, %ymm0, %ymm0
-; CHECK-NEXT: vpxor %ymm1, %ymm0, %ymm0
-; CHECK-NEXT: vpsrad $31, %ymm0, %ymm1
-; CHECK-NEXT: vpshufd {{.*#+}} ymm1 = ymm1[1,1,3,3,5,5,7,7]
-; CHECK-NEXT: vpaddq %ymm1, %ymm0, %ymm0
-; CHECK-NEXT: vpxor %ymm1, %ymm0, %ymm0
-; CHECK-NEXT: retq
+; AVX2-LABEL: combine_v4i64_abs_abs:
+; AVX2: # BB#0:
+; AVX2-NEXT: vpsrad $31, %ymm0, %ymm1
+; AVX2-NEXT: vpshufd {{.*#+}} ymm1 = ymm1[1,1,3,3,5,5,7,7]
+; AVX2-NEXT: vpaddq %ymm1, %ymm0, %ymm0
+; AVX2-NEXT: vpxor %ymm1, %ymm0, %ymm0
+; AVX2-NEXT: vpsrad $31, %ymm0, %ymm1
+; AVX2-NEXT: vpshufd {{.*#+}} ymm1 = ymm1[1,1,3,3,5,5,7,7]
+; AVX2-NEXT: vpaddq %ymm1, %ymm0, %ymm0
+; AVX2-NEXT: vpxor %ymm1, %ymm0, %ymm0
+; AVX2-NEXT: retq
+;
+; AVX512F-LABEL: combine_v4i64_abs_abs:
+; AVX512F: # BB#0:
+; AVX512F-NEXT: # kill: %YMM0<def> %YMM0<kill> %ZMM0<def>
+; AVX512F-NEXT: vpabsq %zmm0, %zmm0
+; AVX512F-NEXT: # kill: %YMM0<def> %YMM0<kill> %ZMM0<kill>
+; AVX512F-NEXT: retq
+;
+; AVX512VL-LABEL: combine_v4i64_abs_abs:
+; AVX512VL: # BB#0:
+; AVX512VL-NEXT: vpabsq %ymm0, %ymm0
+; AVX512VL-NEXT: retq
%n1 = sub <4 x i64> zeroinitializer, %a
%b1 = icmp slt <4 x i64> %a, zeroinitializer
%a1 = select <4 x i1> %b1, <4 x i64> %n1, <4 x i64> %a
diff --git a/test/CodeGen/X86/commuted-blend-mask.ll b/test/CodeGen/X86/commuted-blend-mask.ll
index e6322cbb7a14..37830509d5a2 100644
--- a/test/CodeGen/X86/commuted-blend-mask.ll
+++ b/test/CodeGen/X86/commuted-blend-mask.ll
@@ -1,4 +1,5 @@
-; RUN: llc -mtriple=x86_64-unknown-unknown -mattr=+sse4.1 < %s | FileCheck %s
+; RUN: llc < %s -mtriple=i686-unknown-unknown -mattr=+sse4.1 | FileCheck %s
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+sse4.1 | FileCheck %s
; When commuting the operands of a SSE blend, make sure that the resulting blend
; mask can be encoded as a imm8.
@@ -7,7 +8,7 @@
; pblendw $4294967103, %xmm1, %xmm0
define <4 x i32> @test(<4 x i32> %a, <4 x i32> %b) {
- ;CHECK: pblendw $63, %xmm1, %xmm0
+; CHECK: pblendw $63, %xmm1, %xmm0
%shuffle = shufflevector <4 x i32> %a, <4 x i32> %b, <4 x i32> <i32 4, i32 5, i32 6, i32 3>
ret <4 x i32> %shuffle
}
diff --git a/test/CodeGen/X86/ctpop-combine.ll b/test/CodeGen/X86/ctpop-combine.ll
index b7031a817e82..bbfc2ead04c6 100644
--- a/test/CodeGen/X86/ctpop-combine.ll
+++ b/test/CodeGen/X86/ctpop-combine.ll
@@ -1,6 +1,7 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc < %s -mtriple=x86_64-unknown -mcpu=corei7 | FileCheck %s
+declare i8 @llvm.ctpop.i8(i8) nounwind readnone
declare i64 @llvm.ctpop.i64(i64) nounwind readnone
define i32 @test1(i64 %x) nounwind readnone {
@@ -48,3 +49,16 @@ define i32 @test3(i64 %x) nounwind readnone {
%conv = zext i1 %cmp to i32
ret i32 %conv
}
+
+define i8 @test4(i8 %x) nounwind readnone {
+; CHECK-LABEL: test4:
+; CHECK: # BB#0:
+; CHECK-NEXT: andl $127, %edi
+; CHECK-NEXT: popcntw %di, %ax
+; CHECK-NEXT: # kill: %AL<def> %AL<kill> %AX<kill>
+; CHECK-NEXT: retq
+ %x2 = and i8 %x, 127
+ %count = tail call i8 @llvm.ctpop.i8(i8 %x2)
+ %and = and i8 %count, 7
+ ret i8 %and
+}
diff --git a/test/CodeGen/X86/dbg-baseptr.ll b/test/CodeGen/X86/dbg-baseptr.ll
index f69c78af7367..fb0da1b50d11 100644
--- a/test/CodeGen/X86/dbg-baseptr.ll
+++ b/test/CodeGen/X86/dbg-baseptr.ll
@@ -16,12 +16,12 @@ define i32 @f0(%struct.s* byval align 8 %input) !dbg !8 {
; CHECK-LABEL: f1:
; CHECK: DEBUG_VALUE: f:input <- [%RBP+16]
-define i32 @f1(%struct.s* byval align 8 %input) !dbg !8 {
+define i32 @f1(%struct.s* byval align 8 %input) !dbg !19 {
%val = load i64, i64* @glob
; this alloca should force FP usage.
%stackspace = alloca i32, i64 %val, align 1
store i32* %stackspace, i32** @ptr
- call void @llvm.dbg.declare(metadata %struct.s* %input, metadata !4, metadata !17), !dbg !18
+ call void @llvm.dbg.declare(metadata %struct.s* %input, metadata !20, metadata !17), !dbg !21
ret i32 42
}
@@ -37,11 +37,11 @@ define i32 @f1(%struct.s* byval align 8 %input) !dbg !8 {
; The parameter should still be referenced through RBP though.
; CHECK-NOT: DEBUG_VALUE: f:input <- [%RBX
; CHECK: DEBUG_VALUE: f:input <- [%RBP+16]
-define i32 @f2(%struct.s* byval align 8 %input) !dbg !8 {
+define i32 @f2(%struct.s* byval align 8 %input) !dbg !22 {
%val = load i64, i64* @glob
%stackspace = alloca i32, i64 %val, align 64
store i32* %stackspace, i32** @ptr
- call void @llvm.dbg.declare(metadata %struct.s* %input, metadata !4, metadata !17), !dbg !18
+ call void @llvm.dbg.declare(metadata %struct.s* %input, metadata !23, metadata !17), !dbg !24
ret i32 42
}
@@ -73,3 +73,10 @@ declare void @llvm.dbg.declare(metadata, metadata, metadata)
!17 = !DIExpression()
!18 = !DILocation(line: 5, scope: !8)
+
+!19 = distinct !DISubprogram(name: "f", file: !3, line: 5, type: !6, isLocal: false, isDefinition: true, flags: DIFlagPrototyped, unit: !2, variables: !5)
+!20 = !DILocalVariable(name: "input", arg: 1, scope: !19, file: !3, line: 5, type: !9)
+!21 = !DILocation(line: 5, scope: !19)
+!22 = distinct !DISubprogram(name: "f", file: !3, line: 5, type: !6, isLocal: false, isDefinition: true, flags: DIFlagPrototyped, unit: !2, variables: !5)
+!23 = !DILocalVariable(name: "input", arg: 1, scope: !22, file: !3, line: 5, type: !9)
+!24 = !DILocation(line: 5, scope: !22)
diff --git a/test/CodeGen/X86/eflags-copy-expansion.mir b/test/CodeGen/X86/eflags-copy-expansion.mir
index 36044b4d2059..28f47c3c2496 100644
--- a/test/CodeGen/X86/eflags-copy-expansion.mir
+++ b/test/CodeGen/X86/eflags-copy-expansion.mir
@@ -25,7 +25,6 @@ liveins:
body: |
bb.0.entry:
liveins: %edi
- successors: %bb.1.false
NOOP implicit-def %al
; The bug was triggered only when LivePhysReg is used, which
diff --git a/test/CodeGen/X86/frame-lowering-debug-intrinsic.ll b/test/CodeGen/X86/frame-lowering-debug-intrinsic.ll
index e86d094ac341..f9ecf707810b 100644
--- a/test/CodeGen/X86/frame-lowering-debug-intrinsic.ll
+++ b/test/CodeGen/X86/frame-lowering-debug-intrinsic.ll
@@ -35,8 +35,8 @@ define void @fn2NoDebug(%struct.Buffer* byval align 64 %p1) {
; CHECK-NEXT: pop
; CHECK-NEXT: ret
-define void @fn2WithDebug(%struct.Buffer* byval align 64 %p1) !dbg !4 {
- call void @llvm.dbg.declare(metadata %struct.Buffer* %p1, metadata !5, metadata !6), !dbg !7
+define void @fn2WithDebug(%struct.Buffer* byval align 64 %p1) !dbg !8 {
+ call void @llvm.dbg.declare(metadata %struct.Buffer* %p1, metadata !9, metadata !6), !dbg !10
ret void
}
@@ -64,3 +64,6 @@ declare void @llvm.dbg.declare(metadata, metadata, metadata)
!5 = !DILocalVariable(name: "w", scope: !4)
!6 = !DIExpression()
!7 = !DILocation(line: 210, column: 12, scope: !4)
+!8 = distinct !DISubprogram(name: "withDebug", unit: !0)
+!9 = !DILocalVariable(name: "w", scope: !8)
+!10 = !DILocation(line: 210, column: 12, scope: !8)
diff --git a/test/CodeGen/X86/implicit-null-checks.mir b/test/CodeGen/X86/implicit-null-checks.mir
index 39bfedaa7814..d0ba057fa009 100644
--- a/test/CodeGen/X86/implicit-null-checks.mir
+++ b/test/CodeGen/X86/implicit-null-checks.mir
@@ -384,14 +384,12 @@ liveins:
body: |
bb.0.entry:
- successors: %bb.3.is_null, %bb.1.not_null
liveins: %esi, %rdi
TEST64rr %rdi, %rdi, implicit-def %eflags
JE_1 %bb.3.is_null, implicit %eflags
bb.1.not_null:
- successors: %bb.4.ret_100, %bb.2.ret_200
liveins: %esi, %rdi
%eax = MOV32ri 2200000
@@ -427,7 +425,6 @@ liveins:
body: |
bb.0.entry:
- successors: %bb.3.is_null, %bb.1.not_null
liveins: %esi, %rdi, %rdx
%eax = MOV32rm killed %rdx, 1, _, 0, _ :: (volatile load 4 from %ir.ptr)
@@ -435,7 +432,6 @@ body: |
JE_1 %bb.3.is_null, implicit %eflags
bb.1.not_null:
- successors: %bb.4.ret_100, %bb.2.ret_200
liveins: %esi, %rdi
%eax = MOV32ri 2200000
@@ -444,7 +440,6 @@ body: |
JE_1 %bb.4.ret_100, implicit %eflags
bb.2.ret_200:
- successors: %bb.3.is_null
%eax = MOV32ri 200
@@ -472,14 +467,12 @@ liveins:
body: |
bb.0.entry:
- successors: %bb.3.is_null, %bb.1.not_null
liveins: %esi, %rdi
TEST64rr %rdi, %rdi, implicit-def %eflags
JE_1 %bb.3.is_null, implicit %eflags
bb.1.not_null:
- successors: %bb.4.ret_100, %bb.2.ret_200
liveins: %esi, %rdi
%eax = MOV32ri 2200000
@@ -515,14 +508,12 @@ liveins:
body: |
bb.0.entry:
- successors: %bb.3.is_null, %bb.1.not_null
liveins: %rsi, %rdi
TEST64rr %rdi, %rdi, implicit-def %eflags
JE_1 %bb.3.is_null, implicit %eflags
bb.1.not_null:
- successors: %bb.4.ret_100, %bb.2.ret_200
liveins: %rsi, %rdi
%rdi = MOV64ri 5000
@@ -557,14 +548,12 @@ liveins:
body: |
bb.0.entry:
- successors: %bb.3.is_null, %bb.1.not_null
liveins: %rsi, %rdi, %rdx
TEST64rr %rdi, %rdi, implicit-def %eflags
JE_1 %bb.3.is_null, implicit %eflags
bb.1.not_null:
- successors: %bb.4.ret_100, %bb.2.ret_200
liveins: %rsi, %rdi, %rdx
%rbx = MOV64rr %rdx
@@ -603,7 +592,6 @@ calleeSavedRegisters: [ '%bh', '%bl', '%bp', '%bpl', '%bx', '%ebp', '%ebx',
# CHECK: CALL64pcrel32
body: |
bb.0.entry:
- successors: %bb.2.leave, %bb.1.stay
liveins: %rdi, %rbx
frame-setup PUSH64r killed %rbx, implicit-def %rsp, implicit %rsp
@@ -645,7 +633,6 @@ liveins:
- { reg: '%rsi' }
body: |
bb.0.entry:
- successors: %bb.2.is_null, %bb.1.not_null
liveins: %rdi, %rsi
TEST64rr %rdi, %rdi, implicit-def %eflags
@@ -680,7 +667,6 @@ liveins:
- { reg: '%rsi' }
body: |
bb.0.entry:
- successors: %bb.2.is_null, %bb.1.not_null
liveins: %rdi, %rsi
TEST64rr %rdi, %rdi, implicit-def %eflags
@@ -712,7 +698,6 @@ liveins:
- { reg: '%rsi' }
body: |
bb.0.entry:
- successors: %bb.1.is_null(0x30000000), %bb.2.not_null(0x50000000)
liveins: %rsi, %rdi
TEST64rr %rdi, %rdi, implicit-def %eflags
@@ -745,7 +730,6 @@ liveins:
- { reg: '%rsi' }
body: |
bb.0.entry:
- successors: %bb.1.is_null(0x30000000), %bb.2.not_null(0x50000000)
liveins: %rsi, %rdi
TEST64rr %rdi, %rdi, implicit-def %eflags
@@ -779,7 +763,6 @@ liveins:
- { reg: '%rsi' }
body: |
bb.0.entry:
- successors: %bb.2.is_null, %bb.1.not_null
liveins: %rdi, %rsi
TEST64rr %rdi, %rdi, implicit-def %eflags
@@ -810,7 +793,6 @@ liveins:
- { reg: '%rsi' }
body: |
bb.0.entry:
- successors: %bb.2.is_null, %bb.1.not_null
liveins: %rdi, %rsi
TEST64rr %rdi, %rdi, implicit-def %eflags
@@ -842,7 +824,6 @@ liveins:
- { reg: '%rsi' }
body: |
bb.0.entry:
- successors: %bb.2.is_null, %bb.1.not_null
liveins: %rdi, %rsi
TEST64rr %rdi, %rdi, implicit-def %eflags
@@ -874,7 +855,6 @@ liveins:
- { reg: '%rsi' }
body: |
bb.0.entry:
- successors: %bb.2.is_null, %bb.1.not_null
liveins: %rdi, %rsi
TEST64rr %rdi, %rdi, implicit-def %eflags
@@ -910,7 +890,6 @@ liveins:
- { reg: '%rsi' }
body: |
bb.0.entry:
- successors: %bb.2.is_null, %bb.1.not_null
liveins: %rdi, %rsi
TEST64rr %rdi, %rdi, implicit-def %eflags
@@ -941,7 +920,6 @@ liveins:
- { reg: '%rsi' }
body: |
bb.0.entry:
- successors: %bb.2.is_null, %bb.1.not_null
liveins: %rdi, %rsi
TEST64rr %rdi, %rdi, implicit-def %eflags
@@ -974,7 +952,6 @@ liveins:
- { reg: '%rsi' }
body: |
bb.0.entry:
- successors: %bb.2.is_null, %bb.1.not_null
liveins: %rdi, %rsi
TEST64rr %rdi, %rdi, implicit-def %eflags
@@ -1006,7 +983,6 @@ liveins:
- { reg: '%rsi' }
body: |
bb.0.entry:
- successors: %bb.2.is_null, %bb.1.not_null
liveins: %rdi, %rsi
TEST64rr %rdi, %rdi, implicit-def %eflags
@@ -1042,7 +1018,6 @@ calleeSavedRegisters: [ '%bh', '%bl', '%bp', '%bpl', '%bx', '%ebp', '%ebx',
'%r14d', '%r15d', '%r12w', '%r13w', '%r14w', '%r15w' ]
body: |
bb.0.entry:
- successors: %bb.2.is_null, %bb.1.not_null
liveins: %rdi, %rbx
frame-setup PUSH64r killed %rbx, implicit-def %rsp, implicit %rsp
@@ -1082,7 +1057,6 @@ liveins:
- { reg: '%rsi' }
body: |
bb.0.entry:
- successors: %bb.2.is_null, %bb.1.not_null
liveins: %rdi, %rsi
TEST64rr %rdi, %rdi, implicit-def %eflags
@@ -1116,7 +1090,6 @@ liveins:
- { reg: '%rsi' }
body: |
bb.0.entry:
- successors: %bb.2.is_null, %bb.1.not_null
liveins: %rdi, %rsi
TEST64rr %rdi, %rdi, implicit-def %eflags
@@ -1149,7 +1122,6 @@ liveins:
- { reg: '%rsi' }
body: |
bb.0.entry:
- successors: %bb.2.is_null, %bb.1.not_null
liveins: %rdi, %rsi
TEST64rr %rdi, %rdi, implicit-def %eflags
@@ -1182,7 +1154,6 @@ liveins:
- { reg: '%rsi' }
body: |
bb.0.entry:
- successors: %bb.2.is_null, %bb.1.not_null
liveins: %rdi, %rsi
TEST64rr %rdi, %rdi, implicit-def %eflags
@@ -1214,7 +1185,6 @@ liveins:
- { reg: '%rsi' }
body: |
bb.0.entry:
- successors: %bb.2.is_null, %bb.1.not_null
liveins: %rdi, %rsi
TEST64rr %rdi, %rdi, implicit-def %eflags
@@ -1246,7 +1216,6 @@ liveins:
- { reg: '%rsi' }
body: |
bb.0.entry:
- successors: %bb.2.is_null, %bb.1.not_null
liveins: %rdi, %rsi
TEST64rr %rdi, %rdi, implicit-def %eflags
@@ -1279,7 +1248,6 @@ liveins:
- { reg: '%rsi' }
body: |
bb.0.entry:
- successors: %bb.2.is_null, %bb.1.not_null
liveins: %rdi, %rsi
TEST64rr %rdi, %rdi, implicit-def %eflags
diff --git a/test/CodeGen/X86/invalid-liveness.mir b/test/CodeGen/X86/invalid-liveness.mir
index ca862472ba86..c1da65e0be69 100644
--- a/test/CodeGen/X86/invalid-liveness.mir
+++ b/test/CodeGen/X86/invalid-liveness.mir
@@ -16,12 +16,10 @@ registers:
- { id: 0, class: gr32 }
body: |
bb.0:
- successors: %bb.2, %bb.3
JG_1 %bb.2, implicit %eflags
JMP_1 %bb.3
bb.2:
- successors: %bb.3
%0 = IMPLICIT_DEF
JMP_1 %bb.3
diff --git a/test/CodeGen/X86/machine-region-info.mir b/test/CodeGen/X86/machine-region-info.mir
index 0998fe97c235..78823a3eb006 100644
--- a/test/CodeGen/X86/machine-region-info.mir
+++ b/test/CodeGen/X86/machine-region-info.mir
@@ -4,67 +4,48 @@
name: fun
body: |
bb.0:
- successors: %bb.1, %bb.7
-
CMP32ri8 %edi, 40, implicit-def %eflags
JNE_1 %bb.7, implicit killed %eflags
JMP_1 %bb.1
bb.1:
- successors: %bb.2, %bb.11
-
CMP32ri8 %edi, 1, implicit-def %eflags
JNE_1 %bb.11, implicit killed %eflags
JMP_1 %bb.2
bb.2:
- successors: %bb.3, %bb.5
-
CMP32ri8 %edi, 2, implicit-def %eflags
JNE_1 %bb.5, implicit killed %eflags
JMP_1 %bb.3
bb.3:
- successors: %bb.4, %bb.5
-
CMP32ri8 %edi, 90, implicit-def %eflags
JNE_1 %bb.5, implicit killed %eflags
JMP_1 %bb.4
bb.4:
- successors: %bb.5
bb.5:
- successors: %bb.6, %bb.11
-
CMP32ri8 %edi, 4, implicit-def %eflags
JNE_1 %bb.11, implicit killed %eflags
JMP_1 %bb.6
bb.6:
- successors: %bb.11
-
JMP_1 %bb.11
bb.7:
- successors: %bb.9, %bb.8
-
CMP32ri8 %edi, 5, implicit-def %eflags
JE_1 %bb.9, implicit killed %eflags
JMP_1 %bb.8
bb.8:
- successors: %bb.9
bb.9:
- successors: %bb.11, %bb.10
-
CMP32ri8 %edi, 6, implicit-def %eflags
JE_1 %bb.11, implicit killed %eflags
JMP_1 %bb.10
bb.10:
- successors: %bb.11
bb.11:
RET 0
@@ -74,10 +55,10 @@ body: |
# CHECK: Region tree:
# CHECK-NEXT: [0] BB#0 => <Function Return>
# CHECK-NEXT: [1] BB#0 => BB#11
+# CHECK-NEXT: [2] BB#7 => BB#9
+# CHECK-NEXT: [2] BB#9 => BB#11
# CHECK-NEXT: [2] BB#1 => BB#11
# CHECK-NEXT: [3] BB#2 => BB#5
# CHECK-NEXT: [4] BB#3 => BB#5
# CHECK-NEXT: [3] BB#5 => BB#11
-# CHECK-NEXT: [2] BB#7 => BB#9
-# CHECK-NEXT: [2] BB#9 => BB#11
# CHECK-NEXT: End region tree
diff --git a/test/CodeGen/X86/ms-inline-asm-avx512.ll b/test/CodeGen/X86/ms-inline-asm-avx512.ll
new file mode 100644
index 000000000000..be60f5bca161
--- /dev/null
+++ b/test/CodeGen/X86/ms-inline-asm-avx512.ll
@@ -0,0 +1,24 @@
+; RUN: llc < %s | FileCheck %s
+
+; Generated from clang/test/CodeGen/ms-inline-asm-avx512.c
+
+target datalayout = "e-m:w-i64:64-f80:128-n8:16:32:64-S128"
+target triple = "x86_64-pc-windows-msvc"
+
+; Function Attrs: noinline nounwind
+define void @ignore_fe_size() #0 {
+entry:
+ %c = alloca i8, align 1
+ call void asm sideeffect inteldialect "vaddps xmm1, xmm2, $1{1to4}\0A\09vaddps xmm1, xmm2, $2\0A\09mov eax, $3\0A\09mov $0, rax", "=*m,*m,*m,*m,~{eax},~{xmm1},~{dirflag},~{fpsr},~{flags}"(i8* %c, i8* %c, i8* %c, i8* %c) #1
+ ret void
+}
+
+; CHECK-LABEL: ignore_fe_size:
+; CHECK: vaddps 7(%rsp){1to4}, %xmm2, %xmm1
+; CHECK: vaddps 7(%rsp), %xmm2, %xmm1
+; CHECK: movl 7(%rsp), %eax
+; CHECK: movq %rax, 7(%rsp)
+; CHECK: retq
+
+attributes #0 = { noinline nounwind "correctly-rounded-divide-sqrt-fp-math"="false" "disable-tail-calls"="false" "less-precise-fpmad"="false" "no-frame-pointer-elim"="false" "no-infs-fp-math"="false" "no-jump-tables"="false" "no-nans-fp-math"="false" "no-signed-zeros-fp-math"="false" "no-trapping-math"="false" "stack-protector-buffer-size"="8" "target-cpu"="skylake-avx512" "target-features"="+adx,+aes,+avx,+avx2,+avx512bw,+avx512cd,+avx512dq,+avx512f,+avx512vl,+bmi,+bmi2,+clflushopt,+clwb,+cx16,+f16c,+fma,+fsgsbase,+fxsr,+lzcnt,+mmx,+movbe,+mpx,+pclmul,+pku,+popcnt,+rdrnd,+rdseed,+rtm,+sgx,+sse,+sse2,+sse3,+sse4.1,+sse4.2,+ssse3,+x87,+xsave,+xsavec,+xsaveopt,+xsaves" "unsafe-fp-math"="false" "use-soft-float"="false" }
+attributes #1 = { nounwind }
diff --git a/test/CodeGen/X86/pr27681.mir b/test/CodeGen/X86/pr27681.mir
index 3e931b182e4e..002761bc1e68 100644
--- a/test/CodeGen/X86/pr27681.mir
+++ b/test/CodeGen/X86/pr27681.mir
@@ -25,7 +25,6 @@ stack:
- { id: 2, type: spill-slot, offset: -32, size: 4, alignment: 4 }
body: |
bb.0:
- successors: %bb.1
liveins: %ebp, %ebx, %edi, %esi
frame-setup PUSH32r killed %ebp, implicit-def %esp, implicit %esp
@@ -41,7 +40,6 @@ body: |
%edx = MOV32ri 6
bb.1:
- successors: %bb.3, %bb.2
liveins: %eax, %ebp, %ebx, %ecx, %edi, %edx
%ebp = SHR32rCL killed %ebp, implicit-def dead %eflags, implicit %cl
@@ -66,7 +64,6 @@ body: |
JE_1 %bb.3, implicit %eflags
bb.2:
- successors: %bb.3
liveins: %cl, %eax, %ebp, %esi
OR32mr %esp, 1, _, 8, _, killed %eax, implicit-def %eflags ; :: (store 4 into %stack.1)
diff --git a/test/CodeGen/X86/pr32907.ll b/test/CodeGen/X86/pr32907.ll
new file mode 100644
index 000000000000..bc03fbe06843
--- /dev/null
+++ b/test/CodeGen/X86/pr32907.ll
@@ -0,0 +1,54 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc < %s -mtriple=x86_64-unknown -mattr=+sse2 | FileCheck %s --check-prefix=SSE --check-prefix=SSE2
+; RUN: llc < %s -mtriple=x86_64-unknown -mattr=+sse4.2 | FileCheck %s --check-prefix=SSE --check-prefix=SSE42
+; RUN: llc < %s -mtriple=x86_64-unknown -mattr=+avx2 | FileCheck %s --check-prefix=AVX --check-prefix=AVX2
+; RUN: llc < %s -mtriple=x86_64-unknown -mattr=+avx512f | FileCheck %s --check-prefix=AVX --check-prefix=AVX512
+
+define <2 x i64> @PR32907(<2 x i64> %astype.i, <2 x i64> %astype6.i) {
+; SSE-LABEL: PR32907:
+; SSE: # BB#0: # %entry
+; SSE-NEXT: psubq %xmm1, %xmm0
+; SSE-NEXT: movdqa %xmm0, %xmm1
+; SSE-NEXT: psrad $31, %xmm1
+; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm1[1,1,3,3]
+; SSE-NEXT: pxor %xmm1, %xmm1
+; SSE-NEXT: psubq %xmm0, %xmm1
+; SSE-NEXT: pand %xmm2, %xmm1
+; SSE-NEXT: pandn %xmm0, %xmm2
+; SSE-NEXT: por %xmm2, %xmm1
+; SSE-NEXT: movdqa %xmm1, %xmm0
+; SSE-NEXT: retq
+;
+; AVX2-LABEL: PR32907:
+; AVX2: # BB#0: # %entry
+; AVX2-NEXT: vpsubq %xmm1, %xmm0, %xmm0
+; AVX2-NEXT: vpsrad $31, %xmm0, %xmm1
+; AVX2-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[1,1,3,3]
+; AVX2-NEXT: vpxor %xmm2, %xmm2, %xmm2
+; AVX2-NEXT: vpsubq %xmm0, %xmm2, %xmm2
+; AVX2-NEXT: vpandn %xmm0, %xmm1, %xmm0
+; AVX2-NEXT: vpand %xmm2, %xmm1, %xmm1
+; AVX2-NEXT: vpor %xmm1, %xmm0, %xmm0
+; AVX2-NEXT: retq
+;
+; AVX512-LABEL: PR32907:
+; AVX512: # BB#0: # %entry
+; AVX512-NEXT: vpsubq %xmm1, %xmm0, %xmm0
+; AVX512-NEXT: vpsraq $63, %zmm0, %zmm1
+; AVX512-NEXT: vpxor %xmm2, %xmm2, %xmm2
+; AVX512-NEXT: vpsubq %xmm0, %xmm2, %xmm2
+; AVX512-NEXT: vpandn %xmm0, %xmm1, %xmm0
+; AVX512-NEXT: vpand %xmm2, %xmm1, %xmm1
+; AVX512-NEXT: vpor %xmm1, %xmm0, %xmm0
+; AVX512-NEXT: vzeroupper
+; AVX512-NEXT: retq
+entry:
+ %sub13.i = sub <2 x i64> %astype.i, %astype6.i
+ %x.lobit.i.i = ashr <2 x i64> %sub13.i, <i64 63, i64 63>
+ %sub.i.i = sub <2 x i64> zeroinitializer, %sub13.i
+ %0 = xor <2 x i64> %x.lobit.i.i, <i64 -1, i64 -1>
+ %1 = and <2 x i64> %sub13.i, %0
+ %2 = and <2 x i64> %x.lobit.i.i, %sub.i.i
+ %cond.i.i = or <2 x i64> %1, %2
+ ret <2 x i64> %cond.i.i
+}
diff --git a/test/CodeGen/X86/pre-coalesce.mir b/test/CodeGen/X86/pre-coalesce.mir
index 11805fe090b4..17d447dd097b 100644
--- a/test/CodeGen/X86/pre-coalesce.mir
+++ b/test/CodeGen/X86/pre-coalesce.mir
@@ -83,8 +83,6 @@ frameInfo:
hasMustTailInVarArgFunc: false
body: |
bb.0.entry:
- successors: %bb.4(0x30000000), %bb.1.while.body.preheader(0x50000000)
-
%0 = MOV64rm %rip, 1, _, @b, _ :: (dereferenceable load 8 from @b)
%12 = MOV8rm %0, 1, _, 0, _ :: (load 1 from %ir.t0)
TEST8rr %12, %12, implicit-def %eflags
@@ -92,17 +90,12 @@ body: |
JNE_1 %bb.1.while.body.preheader, implicit killed %eflags
bb.4:
- successors: %bb.3.while.end(0x80000000)
-
%10 = COPY %11
JMP_1 %bb.3.while.end
bb.1.while.body.preheader:
- successors: %bb.2.while.body(0x80000000)
bb.2.while.body:
- successors: %bb.3.while.end(0x04000000), %bb.2.while.body(0x7c000000)
-
%8 = MOVSX32rr8 %12
%10 = COPY %11
%10 = SHL32ri %10, 5, implicit-def dead %eflags
diff --git a/test/CodeGen/X86/regcall-no-plt.ll b/test/CodeGen/X86/regcall-no-plt.ll
new file mode 100644
index 000000000000..d525448b60ca
--- /dev/null
+++ b/test/CodeGen/X86/regcall-no-plt.ll
@@ -0,0 +1,44 @@
+; RUN: llc -mtriple=x86_64-unknown-linux-gnu -relocation-model=pic < %s | FileCheck %s
+; RUN: llc -mtriple=x86_64-freebsd -relocation-model=pic < %s | FileCheck %s
+
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;; According to x86-64 psABI, xmm0-xmm7 can be used to pass function parameters.
+;; However regcall calling convention uses also xmm8-xmm15 to pass function
+;; parameters which violates x86-64 psABI.
+;; Detail info about it can be found at:
+;; https://sourceware.org/bugzilla/show_bug.cgi?id=21265
+;;
+;; We encounter the violation symptom when using PIC with lazy binding
+;; optimization.
+;; In that case the PLT mechanism as described in x86_64 psABI will
+;; not preserve xmm8-xmm15 registers and will lead to miscompilation.
+;;
+;; The agreed solution is to disable PLT for regcall calling convention for
+;; SystemV using ELF format.
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+
+declare void @lazy()
+declare x86_regcallcc void @regcall_not_lazy()
+
+; CHECK-LABEL: foo:
+; CHECK: callq lazy@PLT
+; CHECK: callq *regcall_not_lazy@GOTPCREL(%rip)
+define void @foo() nounwind {
+ call void @lazy()
+ call void @regcall_not_lazy()
+ ret void
+}
+
+; CHECK-LABEL: tail_call_regcall:
+; CHECK: jmpq *regcall_not_lazy@GOTPCREL(%rip)
+define void @tail_call_regcall() nounwind {
+ tail call void @regcall_not_lazy()
+ ret void
+}
+
+; CHECK-LABEL: tail_call_regular:
+; CHECK: jmp lazy
+define void @tail_call_regular() nounwind {
+ tail call void @lazy()
+ ret void
+}
diff --git a/test/CodeGen/X86/shuffle-vs-trunc-512.ll b/test/CodeGen/X86/shuffle-vs-trunc-512.ll
index d053c63dcdb3..a3ba58975800 100644
--- a/test/CodeGen/X86/shuffle-vs-trunc-512.ll
+++ b/test/CodeGen/X86/shuffle-vs-trunc-512.ll
@@ -392,8 +392,10 @@ define void @shuffle_v32i16_to_v8i16(<32 x i16>* %L, <8 x i16>* %S) nounwind {
; AVX512BW-LABEL: shuffle_v32i16_to_v8i16:
; AVX512BW: # BB#0:
; AVX512BW-NEXT: vmovdqu16 (%rdi), %zmm0
+; AVX512BW-NEXT: vxorps %xmm1, %xmm1, %xmm1
+; AVX512BW-NEXT: vmovss {{.*#+}} xmm1 = xmm0[0],xmm1[1,2,3]
; AVX512BW-NEXT: vpextrw $4, %xmm0, %eax
-; AVX512BW-NEXT: vpinsrw $1, %eax, %xmm0, %xmm1
+; AVX512BW-NEXT: vpinsrw $1, %eax, %xmm1, %xmm1
; AVX512BW-NEXT: vextracti32x4 $1, %zmm0, %xmm2
; AVX512BW-NEXT: vmovd %xmm2, %eax
; AVX512BW-NEXT: vpinsrw $2, %eax, %xmm1, %xmm1
@@ -416,8 +418,10 @@ define void @shuffle_v32i16_to_v8i16(<32 x i16>* %L, <8 x i16>* %S) nounwind {
; AVX512BWVL-LABEL: shuffle_v32i16_to_v8i16:
; AVX512BWVL: # BB#0:
; AVX512BWVL-NEXT: vmovdqu16 (%rdi), %zmm0
+; AVX512BWVL-NEXT: vpxor %xmm1, %xmm1, %xmm1
+; AVX512BWVL-NEXT: vmovss {{.*#+}} xmm1 = xmm0[0],xmm1[1,2,3]
; AVX512BWVL-NEXT: vpextrw $4, %xmm0, %eax
-; AVX512BWVL-NEXT: vpinsrw $1, %eax, %xmm0, %xmm1
+; AVX512BWVL-NEXT: vpinsrw $1, %eax, %xmm1, %xmm1
; AVX512BWVL-NEXT: vextracti32x4 $1, %zmm0, %xmm2
; AVX512BWVL-NEXT: vmovd %xmm2, %eax
; AVX512BWVL-NEXT: vpinsrw $2, %eax, %xmm1, %xmm1
diff --git a/test/CodeGen/X86/stack-folding-int-avx512.ll b/test/CodeGen/X86/stack-folding-int-avx512.ll
index 04a7d1159014..38e19efb7132 100644
--- a/test/CodeGen/X86/stack-folding-int-avx512.ll
+++ b/test/CodeGen/X86/stack-folding-int-avx512.ll
@@ -204,8 +204,8 @@ define <64 x i8> @stack_fold_pabsb_maskz(<64 x i8> %a0, i64 %mask) {
}
define <16 x i32> @stack_fold_pabsd(<16 x i32> %a0) {
- ;check-label: stack_fold_pabsd
- ;check: vpabsd {{-?[0-9]*}}(%rsp), {{%zmm[0-9][0-9]*}} {{.*#+}} 64-byte folded reload
+ ;CHECK-LABEL: stack_fold_pabsd
+ ;CHECK: vpabsd {{-?[0-9]*}}(%rsp), {{%zmm[0-9][0-9]*}} {{.*#+}} 64-byte Folded Reload
%1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
%2 = call <16 x i32> @llvm.x86.avx512.mask.pabs.d.512(<16 x i32> %a0, <16 x i32> undef, i16 -1)
ret <16 x i32> %2
@@ -213,16 +213,16 @@ define <16 x i32> @stack_fold_pabsd(<16 x i32> %a0) {
declare <16 x i32> @llvm.x86.avx512.mask.pabs.d.512(<16 x i32>, <16 x i32>, i16) nounwind readnone
define <16 x i32> @stack_fold_pabsd_mask(<16 x i32> %passthru, <16 x i32> %a0, i16 %mask) {
- ;check-label: stack_fold_pabsd
- ;check: vpabsd {{-?[0-9]*}}(%rsp), {{%zmm[0-9][0-9]*}} {{{%k[0-7]}}} {{.*#+}} 64-byte folded reload
+ ;CHECK-LABEL: stack_fold_pabsd_mask
+ ;CHECK: vpabsd {{-?[0-9]*}}(%rsp), {{%zmm[0-9][0-9]*}} {{{%k[0-7]}}} {{.*#+}} 64-byte Folded Reload
%1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
%2 = call <16 x i32> @llvm.x86.avx512.mask.pabs.d.512(<16 x i32> %a0, <16 x i32> %passthru, i16 %mask)
ret <16 x i32> %2
}
define <16 x i32> @stack_fold_pabsd_maskz(<16 x i32> %a0, i16 %mask) {
- ;check-label: stack_fold_pabsd
- ;check: vpabsd {{-?[0-9]*}}(%rsp), {{%zmm[0-9][0-9]*}} {{{%k[0-7]}}} {z} {{.*#+}} 64-byte folded reload
+ ;CHECK-LABEL: stack_fold_pabsd_maskz
+ ;CHECK: vpabsd {{-?[0-9]*}}(%rsp), {{%zmm[0-9][0-9]*}} {{{%k[0-7]}}} {z} {{.*#+}} 64-byte Folded Reload
%1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
%2 = call <16 x i32> @llvm.x86.avx512.mask.pabs.d.512(<16 x i32> %a0, <16 x i32> zeroinitializer, i16 %mask)
ret <16 x i32> %2
diff --git a/test/CodeGen/X86/vec_partial.ll b/test/CodeGen/X86/vec_partial.ll
index e5ac81add7f6..ee15c2af6dd2 100644
--- a/test/CodeGen/X86/vec_partial.ll
+++ b/test/CodeGen/X86/vec_partial.ll
@@ -1,12 +1,18 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc < %s -mtriple=x86_64-unknown-unknown | FileCheck %s
+; RUN: llc < %s -mtriple=i686-unknown-unknown -mattr=+sse2 | FileCheck %s --check-prefix=X86
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+sse2 | FileCheck %s --check-prefix=X64
; PR11580
define <3 x float> @addf3(<3 x float> %x) {
-; CHECK-LABEL: addf3:
-; CHECK: # BB#0: # %entry
-; CHECK-NEXT: addps {{.*}}(%rip), %xmm0
-; CHECK-NEXT: retq
+; X86-LABEL: addf3:
+; X86: # BB#0: # %entry
+; X86-NEXT: addps {{\.LCPI.*}}, %xmm0
+; X86-NEXT: retl
+;
+; X64-LABEL: addf3:
+; X64: # BB#0: # %entry
+; X64-NEXT: addps {{.*}}(%rip), %xmm0
+; X64-NEXT: retq
entry:
%add = fadd <3 x float> %x, <float 1.000000e+00, float 1.000000e+00, float 1.000000e+00>
ret <3 x float> %add
@@ -14,9 +20,13 @@ entry:
; PR11580
define <4 x float> @cvtf3_f4(<3 x float> %x) {
-; CHECK-LABEL: cvtf3_f4:
-; CHECK: # BB#0: # %entry
-; CHECK-NEXT: retq
+; X86-LABEL: cvtf3_f4:
+; X86: # BB#0: # %entry
+; X86-NEXT: retl
+;
+; X64-LABEL: cvtf3_f4:
+; X64: # BB#0: # %entry
+; X64-NEXT: retq
entry:
%extractVec = shufflevector <3 x float> %x, <3 x float> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 undef>
ret <4 x float> %extractVec
@@ -24,9 +34,13 @@ entry:
; PR11580
define <3 x float> @cvtf4_f3(<4 x float> %x) {
-; CHECK-LABEL: cvtf4_f3:
-; CHECK: # BB#0: # %entry
-; CHECK-NEXT: retq
+; X86-LABEL: cvtf4_f3:
+; X86: # BB#0: # %entry
+; X86-NEXT: retl
+;
+; X64-LABEL: cvtf4_f3:
+; X64: # BB#0: # %entry
+; X64-NEXT: retq
entry:
%extractVec = shufflevector <4 x float> %x, <4 x float> undef, <3 x i32> <i32 0, i32 1, i32 2>
ret <3 x float> %extractVec
diff --git a/test/CodeGen/X86/vec_reassociate.ll b/test/CodeGen/X86/vec_reassociate.ll
index 0d3373528f58..5234b0c8a77c 100644
--- a/test/CodeGen/X86/vec_reassociate.ll
+++ b/test/CodeGen/X86/vec_reassociate.ll
@@ -1,10 +1,17 @@
-; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mcpu=x86-64 -mattr=+sse4.1 | FileCheck %s
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc < %s -mtriple=i686-unknown-unknown -mattr=+sse4.1 | FileCheck %s --check-prefix=X86
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+sse4.1 | FileCheck %s --check-prefix=X64
define <4 x i32> @add_4i32(<4 x i32> %a0, <4 x i32> %a1) {
- ;CHECK-LABEL: @add_4i32
- ;CHECK: # BB#0:
- ;CHECK-NEXT: paddd %xmm1, %xmm0
- ;CHECK-NEXT: retq
+; X86-LABEL: add_4i32:
+; X86: # BB#0:
+; X86-NEXT: paddd %xmm1, %xmm0
+; X86-NEXT: retl
+;
+; X64-LABEL: add_4i32:
+; X64: # BB#0:
+; X64-NEXT: paddd %xmm1, %xmm0
+; X64-NEXT: retq
%1 = add <4 x i32> %a0, <i32 1, i32 -2, i32 3, i32 -4>
%2 = add <4 x i32> %a1, <i32 -1, i32 2, i32 -3, i32 4>
%3 = add <4 x i32> %1, %2
@@ -12,10 +19,15 @@ define <4 x i32> @add_4i32(<4 x i32> %a0, <4 x i32> %a1) {
}
define <4 x i32> @add_4i32_commute(<4 x i32> %a0, <4 x i32> %a1) {
- ;CHECK-LABEL: @add_4i32_commute
- ;CHECK: # BB#0:
- ;CHECK-NEXT: paddd %xmm1, %xmm0
- ;CHECK-NEXT: retq
+; X86-LABEL: add_4i32_commute:
+; X86: # BB#0:
+; X86-NEXT: paddd %xmm1, %xmm0
+; X86-NEXT: retl
+;
+; X64-LABEL: add_4i32_commute:
+; X64: # BB#0:
+; X64-NEXT: paddd %xmm1, %xmm0
+; X64-NEXT: retq
%1 = add <4 x i32> <i32 1, i32 -2, i32 3, i32 -4>, %a0
%2 = add <4 x i32> <i32 -1, i32 2, i32 -3, i32 4>, %a1
%3 = add <4 x i32> %1, %2
@@ -23,11 +35,17 @@ define <4 x i32> @add_4i32_commute(<4 x i32> %a0, <4 x i32> %a1) {
}
define <4 x i32> @mul_4i32(<4 x i32> %a0, <4 x i32> %a1) {
- ;CHECK-LABEL: @mul_4i32
- ;CHECK: # BB#0:
- ;CHECK-NEXT: pmulld %xmm1, %xmm0
- ;CHECK-NEXT: pmulld .LCPI2_0(%rip), %xmm0
- ;CHECK-NEXT: retq
+; X86-LABEL: mul_4i32:
+; X86: # BB#0:
+; X86-NEXT: pmulld %xmm1, %xmm0
+; X86-NEXT: pmulld {{\.LCPI.*}}, %xmm0
+; X86-NEXT: retl
+;
+; X64-LABEL: mul_4i32:
+; X64: # BB#0:
+; X64-NEXT: pmulld %xmm1, %xmm0
+; X64-NEXT: pmulld {{.*}}(%rip), %xmm0
+; X64-NEXT: retq
%1 = mul <4 x i32> %a0, <i32 1, i32 2, i32 3, i32 4>
%2 = mul <4 x i32> %a1, <i32 4, i32 3, i32 2, i32 1>
%3 = mul <4 x i32> %1, %2
@@ -35,11 +53,17 @@ define <4 x i32> @mul_4i32(<4 x i32> %a0, <4 x i32> %a1) {
}
define <4 x i32> @mul_4i32_commute(<4 x i32> %a0, <4 x i32> %a1) {
- ;CHECK-LABEL: @mul_4i32_commute
- ;CHECK: # BB#0:
- ;CHECK-NEXT: pmulld %xmm1, %xmm0
- ;CHECK-NEXT: pmulld .LCPI3_0(%rip), %xmm0
- ;CHECK-NEXT: retq
+; X86-LABEL: mul_4i32_commute:
+; X86: # BB#0:
+; X86-NEXT: pmulld %xmm1, %xmm0
+; X86-NEXT: pmulld {{\.LCPI.*}}, %xmm0
+; X86-NEXT: retl
+;
+; X64-LABEL: mul_4i32_commute:
+; X64: # BB#0:
+; X64-NEXT: pmulld %xmm1, %xmm0
+; X64-NEXT: pmulld {{.*}}(%rip), %xmm0
+; X64-NEXT: retq
%1 = mul <4 x i32> <i32 1, i32 2, i32 3, i32 4>, %a0
%2 = mul <4 x i32> <i32 4, i32 3, i32 2, i32 1>, %a1
%3 = mul <4 x i32> %1, %2
@@ -47,11 +71,17 @@ define <4 x i32> @mul_4i32_commute(<4 x i32> %a0, <4 x i32> %a1) {
}
define <4 x i32> @and_4i32(<4 x i32> %a0, <4 x i32> %a1) {
- ;CHECK-LABEL: @and_4i32
- ;CHECK: # BB#0:
- ;CHECK-NEXT: andps %xmm1, %xmm0
- ;CHECK-NEXT: andps .LCPI4_0(%rip), %xmm0
- ;CHECK-NEXT: retq
+; X86-LABEL: and_4i32:
+; X86: # BB#0:
+; X86-NEXT: andps %xmm1, %xmm0
+; X86-NEXT: andps {{\.LCPI.*}}, %xmm0
+; X86-NEXT: retl
+;
+; X64-LABEL: and_4i32:
+; X64: # BB#0:
+; X64-NEXT: andps %xmm1, %xmm0
+; X64-NEXT: andps {{.*}}(%rip), %xmm0
+; X64-NEXT: retq
%1 = and <4 x i32> %a0, <i32 -2, i32 -2, i32 3, i32 3>
%2 = and <4 x i32> %a1, <i32 -1, i32 -1, i32 1, i32 1>
%3 = and <4 x i32> %1, %2
@@ -59,11 +89,17 @@ define <4 x i32> @and_4i32(<4 x i32> %a0, <4 x i32> %a1) {
}
define <4 x i32> @and_4i32_commute(<4 x i32> %a0, <4 x i32> %a1) {
- ;CHECK-LABEL: @and_4i32_commute
- ;CHECK: # BB#0:
- ;CHECK-NEXT: andps %xmm1, %xmm0
- ;CHECK-NEXT: andps .LCPI5_0(%rip), %xmm0
- ;CHECK-NEXT: retq
+; X86-LABEL: and_4i32_commute:
+; X86: # BB#0:
+; X86-NEXT: andps %xmm1, %xmm0
+; X86-NEXT: andps {{\.LCPI.*}}, %xmm0
+; X86-NEXT: retl
+;
+; X64-LABEL: and_4i32_commute:
+; X64: # BB#0:
+; X64-NEXT: andps %xmm1, %xmm0
+; X64-NEXT: andps {{.*}}(%rip), %xmm0
+; X64-NEXT: retq
%1 = and <4 x i32> <i32 -2, i32 -2, i32 3, i32 3>, %a0
%2 = and <4 x i32> <i32 -1, i32 -1, i32 1, i32 1>, %a1
%3 = and <4 x i32> %1, %2
@@ -71,11 +107,17 @@ define <4 x i32> @and_4i32_commute(<4 x i32> %a0, <4 x i32> %a1) {
}
define <4 x i32> @or_4i32(<4 x i32> %a0, <4 x i32> %a1) {
- ;CHECK-LABEL: @or_4i32
- ;CHECK: # BB#0:
- ;CHECK-NEXT: orps %xmm1, %xmm0
- ;CHECK-NEXT: orps .LCPI6_0(%rip), %xmm0
- ;CHECK-NEXT: retq
+; X86-LABEL: or_4i32:
+; X86: # BB#0:
+; X86-NEXT: orps %xmm1, %xmm0
+; X86-NEXT: orps {{\.LCPI.*}}, %xmm0
+; X86-NEXT: retl
+;
+; X64-LABEL: or_4i32:
+; X64: # BB#0:
+; X64-NEXT: orps %xmm1, %xmm0
+; X64-NEXT: orps {{.*}}(%rip), %xmm0
+; X64-NEXT: retq
%1 = or <4 x i32> %a0, <i32 -2, i32 -2, i32 3, i32 3>
%2 = or <4 x i32> %a1, <i32 -1, i32 -1, i32 1, i32 1>
%3 = or <4 x i32> %1, %2
@@ -83,23 +125,35 @@ define <4 x i32> @or_4i32(<4 x i32> %a0, <4 x i32> %a1) {
}
define <4 x i32> @or_4i32_commute(<4 x i32> %a0, <4 x i32> %a1) {
- ;CHECK-LABEL: @or_4i32_commute
- ;CHECK: # BB#0:
- ;CHECK-NEXT: orps %xmm1, %xmm0
- ;CHECK-NEXT: orps .LCPI7_0(%rip), %xmm0
- ;CHECK-NEXT: retq
- %1 = or <4 x i32> <i32 -2, i32 -2, i32 3, i32 3>, %a0
+; X86-LABEL: or_4i32_commute:
+; X86: # BB#0:
+; X86-NEXT: orps %xmm1, %xmm0
+; X86-NEXT: orps {{\.LCPI.*}}, %xmm0
+; X86-NEXT: retl
+;
+; X64-LABEL: or_4i32_commute:
+; X64: # BB#0:
+; X64-NEXT: orps %xmm1, %xmm0
+; X64-NEXT: orps {{.*}}(%rip), %xmm0
+; X64-NEXT: retq
+ %1 = or <4 x i32> <i32 -2, i32 -2, i32 3, i32 3>, %a0
%2 = or <4 x i32> <i32 -1, i32 -1, i32 1, i32 1>, %a1
%3 = or <4 x i32> %1, %2
ret <4 x i32> %3
}
define <4 x i32> @xor_4i32(<4 x i32> %a0, <4 x i32> %a1) {
- ;CHECK-LABEL: @xor_4i32
- ;CHECK: # BB#0:
- ;CHECK-NEXT: xorps %xmm1, %xmm0
- ;CHECK-NEXT: xorps .LCPI8_0(%rip), %xmm0
- ;CHECK-NEXT: retq
+; X86-LABEL: xor_4i32:
+; X86: # BB#0:
+; X86-NEXT: xorps %xmm1, %xmm0
+; X86-NEXT: xorps {{\.LCPI.*}}, %xmm0
+; X86-NEXT: retl
+;
+; X64-LABEL: xor_4i32:
+; X64: # BB#0:
+; X64-NEXT: xorps %xmm1, %xmm0
+; X64-NEXT: xorps {{.*}}(%rip), %xmm0
+; X64-NEXT: retq
%1 = xor <4 x i32> %a0, <i32 -2, i32 -2, i32 3, i32 3>
%2 = xor <4 x i32> %a1, <i32 -1, i32 -1, i32 1, i32 1>
%3 = xor <4 x i32> %1, %2
@@ -107,11 +161,17 @@ define <4 x i32> @xor_4i32(<4 x i32> %a0, <4 x i32> %a1) {
}
define <4 x i32> @xor_4i32_commute(<4 x i32> %a0, <4 x i32> %a1) {
- ;CHECK-LABEL: @xor_4i32_commute
- ;CHECK: # BB#0:
- ;CHECK-NEXT: xorps %xmm1, %xmm0
- ;CHECK-NEXT: xorps .LCPI9_0(%rip), %xmm0
- ;CHECK-NEXT: retq
+; X86-LABEL: xor_4i32_commute:
+; X86: # BB#0:
+; X86-NEXT: xorps %xmm1, %xmm0
+; X86-NEXT: xorps {{\.LCPI.*}}, %xmm0
+; X86-NEXT: retl
+;
+; X64-LABEL: xor_4i32_commute:
+; X64: # BB#0:
+; X64-NEXT: xorps %xmm1, %xmm0
+; X64-NEXT: xorps {{.*}}(%rip), %xmm0
+; X64-NEXT: retq
%1 = xor <4 x i32> <i32 -2, i32 -2, i32 3, i32 3>, %a0
%2 = xor <4 x i32> <i32 -1, i32 -1, i32 1, i32 1>, %a1
%3 = xor <4 x i32> %1, %2
diff --git a/test/CodeGen/X86/vector-lzcnt-512.ll b/test/CodeGen/X86/vector-lzcnt-512.ll
index 79d133bbfb8f..88378eb51a27 100644
--- a/test/CodeGen/X86/vector-lzcnt-512.ll
+++ b/test/CodeGen/X86/vector-lzcnt-512.ll
@@ -1,39 +1,337 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc < %s -mtriple=x86_64-apple-darwin -mcpu=knl -mattr=+avx512cd | FileCheck %s --check-prefix=ALL --check-prefix=AVX512 --check-prefix=AVX512CD
-; RUN: llc < %s -mtriple=x86_64-apple-darwin -mcpu=knl -mattr=+avx512bw | FileCheck %s --check-prefix=ALL --check-prefix=AVX512 --check-prefix=AVX512BW
+; RUN: llc < %s -mtriple=x86_64-apple-darwin -mcpu=knl -mattr=+avx512cd,-avx512bw | FileCheck %s --check-prefix=ALL --check-prefix=AVX512CD
+; RUN: llc < %s -mtriple=x86_64-apple-darwin -mcpu=knl -mattr=+avx512cd,+avx512bw | FileCheck %s --check-prefix=ALL --check-prefix=AVX512CDBW
+; RUN: llc < %s -mtriple=x86_64-apple-darwin -mcpu=knl -mattr=-avx512cd,+avx512bw | FileCheck %s --check-prefix=ALL --check-prefix=AVX512BW
+; RUN: llc < %s -mtriple=x86_64-apple-darwin -mcpu=knl -mattr=-avx512cd,-avx512bw | FileCheck %s --check-prefix=ALL --check-prefix=AVX512DQ
define <8 x i64> @testv8i64(<8 x i64> %in) nounwind {
-; ALL-LABEL: testv8i64:
-; ALL: ## BB#0:
-; ALL-NEXT: vplzcntq %zmm0, %zmm0
-; ALL-NEXT: retq
+; AVX512CD-LABEL: testv8i64:
+; AVX512CD: ## BB#0:
+; AVX512CD-NEXT: vplzcntq %zmm0, %zmm0
+; AVX512CD-NEXT: retq
+;
+; AVX512CDBW-LABEL: testv8i64:
+; AVX512CDBW: ## BB#0:
+; AVX512CDBW-NEXT: vplzcntq %zmm0, %zmm0
+; AVX512CDBW-NEXT: retq
+;
+; AVX512BW-LABEL: testv8i64:
+; AVX512BW: ## BB#0:
+; AVX512BW-NEXT: vpsrlq $1, %zmm0, %zmm1
+; AVX512BW-NEXT: vporq %zmm1, %zmm0, %zmm0
+; AVX512BW-NEXT: vpsrlq $2, %zmm0, %zmm1
+; AVX512BW-NEXT: vporq %zmm1, %zmm0, %zmm0
+; AVX512BW-NEXT: vpsrlq $4, %zmm0, %zmm1
+; AVX512BW-NEXT: vporq %zmm1, %zmm0, %zmm0
+; AVX512BW-NEXT: vpsrlq $8, %zmm0, %zmm1
+; AVX512BW-NEXT: vporq %zmm1, %zmm0, %zmm0
+; AVX512BW-NEXT: vpsrlq $16, %zmm0, %zmm1
+; AVX512BW-NEXT: vporq %zmm1, %zmm0, %zmm0
+; AVX512BW-NEXT: vpsrlq $32, %zmm0, %zmm1
+; AVX512BW-NEXT: vporq %zmm1, %zmm0, %zmm0
+; AVX512BW-NEXT: vmovdqa64 {{.*#+}} zmm1 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
+; AVX512BW-NEXT: vpandnq %zmm1, %zmm0, %zmm2
+; AVX512BW-NEXT: vmovdqu8 {{.*#+}} zmm3 = [0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4,0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4,0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4,0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4]
+; AVX512BW-NEXT: vpshufb %zmm2, %zmm3, %zmm2
+; AVX512BW-NEXT: vpternlogd $255, %zmm4, %zmm4, %zmm4
+; AVX512BW-NEXT: vpxorq %zmm4, %zmm0, %zmm0
+; AVX512BW-NEXT: vpsrlw $4, %zmm0, %zmm0
+; AVX512BW-NEXT: vpandq %zmm1, %zmm0, %zmm0
+; AVX512BW-NEXT: vpshufb %zmm0, %zmm3, %zmm0
+; AVX512BW-NEXT: vpaddb %zmm2, %zmm0, %zmm0
+; AVX512BW-NEXT: vpxord %zmm1, %zmm1, %zmm1
+; AVX512BW-NEXT: vpsadbw %zmm1, %zmm0, %zmm0
+; AVX512BW-NEXT: retq
+;
+; AVX512DQ-LABEL: testv8i64:
+; AVX512DQ: ## BB#0:
+; AVX512DQ-NEXT: vpsrlq $1, %zmm0, %zmm1
+; AVX512DQ-NEXT: vporq %zmm1, %zmm0, %zmm0
+; AVX512DQ-NEXT: vpsrlq $2, %zmm0, %zmm1
+; AVX512DQ-NEXT: vporq %zmm1, %zmm0, %zmm0
+; AVX512DQ-NEXT: vpsrlq $4, %zmm0, %zmm1
+; AVX512DQ-NEXT: vporq %zmm1, %zmm0, %zmm0
+; AVX512DQ-NEXT: vpsrlq $8, %zmm0, %zmm1
+; AVX512DQ-NEXT: vporq %zmm1, %zmm0, %zmm0
+; AVX512DQ-NEXT: vpsrlq $16, %zmm0, %zmm1
+; AVX512DQ-NEXT: vporq %zmm1, %zmm0, %zmm0
+; AVX512DQ-NEXT: vpsrlq $32, %zmm0, %zmm1
+; AVX512DQ-NEXT: vporq %zmm1, %zmm0, %zmm0
+; AVX512DQ-NEXT: vpternlogd $255, %zmm1, %zmm1, %zmm1
+; AVX512DQ-NEXT: vpxorq %zmm1, %zmm0, %zmm0
+; AVX512DQ-NEXT: vextracti64x4 $1, %zmm0, %ymm1
+; AVX512DQ-NEXT: vmovdqa {{.*#+}} ymm2 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
+; AVX512DQ-NEXT: vpand %ymm2, %ymm1, %ymm3
+; AVX512DQ-NEXT: vmovdqa {{.*#+}} ymm4 = [0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4,0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4]
+; AVX512DQ-NEXT: vpshufb %ymm3, %ymm4, %ymm3
+; AVX512DQ-NEXT: vpsrlw $4, %ymm1, %ymm1
+; AVX512DQ-NEXT: vpand %ymm2, %ymm1, %ymm1
+; AVX512DQ-NEXT: vpshufb %ymm1, %ymm4, %ymm1
+; AVX512DQ-NEXT: vpaddb %ymm3, %ymm1, %ymm1
+; AVX512DQ-NEXT: vpxor %ymm3, %ymm3, %ymm3
+; AVX512DQ-NEXT: vpsadbw %ymm3, %ymm1, %ymm1
+; AVX512DQ-NEXT: vpand %ymm2, %ymm0, %ymm5
+; AVX512DQ-NEXT: vpshufb %ymm5, %ymm4, %ymm5
+; AVX512DQ-NEXT: vpsrlw $4, %ymm0, %ymm0
+; AVX512DQ-NEXT: vpand %ymm2, %ymm0, %ymm0
+; AVX512DQ-NEXT: vpshufb %ymm0, %ymm4, %ymm0
+; AVX512DQ-NEXT: vpaddb %ymm5, %ymm0, %ymm0
+; AVX512DQ-NEXT: vpsadbw %ymm3, %ymm0, %ymm0
+; AVX512DQ-NEXT: vinserti64x4 $1, %ymm1, %zmm0, %zmm0
+; AVX512DQ-NEXT: retq
%out = call <8 x i64> @llvm.ctlz.v8i64(<8 x i64> %in, i1 0)
ret <8 x i64> %out
}
define <8 x i64> @testv8i64u(<8 x i64> %in) nounwind {
-; ALL-LABEL: testv8i64u:
-; ALL: ## BB#0:
-; ALL-NEXT: vplzcntq %zmm0, %zmm0
-; ALL-NEXT: retq
+; AVX512CD-LABEL: testv8i64u:
+; AVX512CD: ## BB#0:
+; AVX512CD-NEXT: vplzcntq %zmm0, %zmm0
+; AVX512CD-NEXT: retq
+;
+; AVX512CDBW-LABEL: testv8i64u:
+; AVX512CDBW: ## BB#0:
+; AVX512CDBW-NEXT: vplzcntq %zmm0, %zmm0
+; AVX512CDBW-NEXT: retq
+;
+; AVX512BW-LABEL: testv8i64u:
+; AVX512BW: ## BB#0:
+; AVX512BW-NEXT: vpsrlq $1, %zmm0, %zmm1
+; AVX512BW-NEXT: vporq %zmm1, %zmm0, %zmm0
+; AVX512BW-NEXT: vpsrlq $2, %zmm0, %zmm1
+; AVX512BW-NEXT: vporq %zmm1, %zmm0, %zmm0
+; AVX512BW-NEXT: vpsrlq $4, %zmm0, %zmm1
+; AVX512BW-NEXT: vporq %zmm1, %zmm0, %zmm0
+; AVX512BW-NEXT: vpsrlq $8, %zmm0, %zmm1
+; AVX512BW-NEXT: vporq %zmm1, %zmm0, %zmm0
+; AVX512BW-NEXT: vpsrlq $16, %zmm0, %zmm1
+; AVX512BW-NEXT: vporq %zmm1, %zmm0, %zmm0
+; AVX512BW-NEXT: vpsrlq $32, %zmm0, %zmm1
+; AVX512BW-NEXT: vporq %zmm1, %zmm0, %zmm0
+; AVX512BW-NEXT: vmovdqa64 {{.*#+}} zmm1 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
+; AVX512BW-NEXT: vpandnq %zmm1, %zmm0, %zmm2
+; AVX512BW-NEXT: vmovdqu8 {{.*#+}} zmm3 = [0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4,0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4,0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4,0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4]
+; AVX512BW-NEXT: vpshufb %zmm2, %zmm3, %zmm2
+; AVX512BW-NEXT: vpternlogd $255, %zmm4, %zmm4, %zmm4
+; AVX512BW-NEXT: vpxorq %zmm4, %zmm0, %zmm0
+; AVX512BW-NEXT: vpsrlw $4, %zmm0, %zmm0
+; AVX512BW-NEXT: vpandq %zmm1, %zmm0, %zmm0
+; AVX512BW-NEXT: vpshufb %zmm0, %zmm3, %zmm0
+; AVX512BW-NEXT: vpaddb %zmm2, %zmm0, %zmm0
+; AVX512BW-NEXT: vpxord %zmm1, %zmm1, %zmm1
+; AVX512BW-NEXT: vpsadbw %zmm1, %zmm0, %zmm0
+; AVX512BW-NEXT: retq
+;
+; AVX512DQ-LABEL: testv8i64u:
+; AVX512DQ: ## BB#0:
+; AVX512DQ-NEXT: vpsrlq $1, %zmm0, %zmm1
+; AVX512DQ-NEXT: vporq %zmm1, %zmm0, %zmm0
+; AVX512DQ-NEXT: vpsrlq $2, %zmm0, %zmm1
+; AVX512DQ-NEXT: vporq %zmm1, %zmm0, %zmm0
+; AVX512DQ-NEXT: vpsrlq $4, %zmm0, %zmm1
+; AVX512DQ-NEXT: vporq %zmm1, %zmm0, %zmm0
+; AVX512DQ-NEXT: vpsrlq $8, %zmm0, %zmm1
+; AVX512DQ-NEXT: vporq %zmm1, %zmm0, %zmm0
+; AVX512DQ-NEXT: vpsrlq $16, %zmm0, %zmm1
+; AVX512DQ-NEXT: vporq %zmm1, %zmm0, %zmm0
+; AVX512DQ-NEXT: vpsrlq $32, %zmm0, %zmm1
+; AVX512DQ-NEXT: vporq %zmm1, %zmm0, %zmm0
+; AVX512DQ-NEXT: vpternlogd $255, %zmm1, %zmm1, %zmm1
+; AVX512DQ-NEXT: vpxorq %zmm1, %zmm0, %zmm0
+; AVX512DQ-NEXT: vextracti64x4 $1, %zmm0, %ymm1
+; AVX512DQ-NEXT: vmovdqa {{.*#+}} ymm2 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
+; AVX512DQ-NEXT: vpand %ymm2, %ymm1, %ymm3
+; AVX512DQ-NEXT: vmovdqa {{.*#+}} ymm4 = [0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4,0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4]
+; AVX512DQ-NEXT: vpshufb %ymm3, %ymm4, %ymm3
+; AVX512DQ-NEXT: vpsrlw $4, %ymm1, %ymm1
+; AVX512DQ-NEXT: vpand %ymm2, %ymm1, %ymm1
+; AVX512DQ-NEXT: vpshufb %ymm1, %ymm4, %ymm1
+; AVX512DQ-NEXT: vpaddb %ymm3, %ymm1, %ymm1
+; AVX512DQ-NEXT: vpxor %ymm3, %ymm3, %ymm3
+; AVX512DQ-NEXT: vpsadbw %ymm3, %ymm1, %ymm1
+; AVX512DQ-NEXT: vpand %ymm2, %ymm0, %ymm5
+; AVX512DQ-NEXT: vpshufb %ymm5, %ymm4, %ymm5
+; AVX512DQ-NEXT: vpsrlw $4, %ymm0, %ymm0
+; AVX512DQ-NEXT: vpand %ymm2, %ymm0, %ymm0
+; AVX512DQ-NEXT: vpshufb %ymm0, %ymm4, %ymm0
+; AVX512DQ-NEXT: vpaddb %ymm5, %ymm0, %ymm0
+; AVX512DQ-NEXT: vpsadbw %ymm3, %ymm0, %ymm0
+; AVX512DQ-NEXT: vinserti64x4 $1, %ymm1, %zmm0, %zmm0
+; AVX512DQ-NEXT: retq
%out = call <8 x i64> @llvm.ctlz.v8i64(<8 x i64> %in, i1 -1)
ret <8 x i64> %out
}
define <16 x i32> @testv16i32(<16 x i32> %in) nounwind {
-; ALL-LABEL: testv16i32:
-; ALL: ## BB#0:
-; ALL-NEXT: vplzcntd %zmm0, %zmm0
-; ALL-NEXT: retq
+; AVX512CD-LABEL: testv16i32:
+; AVX512CD: ## BB#0:
+; AVX512CD-NEXT: vplzcntd %zmm0, %zmm0
+; AVX512CD-NEXT: retq
+;
+; AVX512CDBW-LABEL: testv16i32:
+; AVX512CDBW: ## BB#0:
+; AVX512CDBW-NEXT: vplzcntd %zmm0, %zmm0
+; AVX512CDBW-NEXT: retq
+;
+; AVX512BW-LABEL: testv16i32:
+; AVX512BW: ## BB#0:
+; AVX512BW-NEXT: vpsrld $1, %zmm0, %zmm1
+; AVX512BW-NEXT: vpord %zmm1, %zmm0, %zmm0
+; AVX512BW-NEXT: vpsrld $2, %zmm0, %zmm1
+; AVX512BW-NEXT: vporq %zmm1, %zmm0, %zmm0
+; AVX512BW-NEXT: vpsrld $4, %zmm0, %zmm1
+; AVX512BW-NEXT: vporq %zmm1, %zmm0, %zmm0
+; AVX512BW-NEXT: vpsrld $8, %zmm0, %zmm1
+; AVX512BW-NEXT: vporq %zmm1, %zmm0, %zmm0
+; AVX512BW-NEXT: vpsrld $16, %zmm0, %zmm1
+; AVX512BW-NEXT: vporq %zmm1, %zmm0, %zmm0
+; AVX512BW-NEXT: vmovdqa64 {{.*#+}} zmm1 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
+; AVX512BW-NEXT: vpandnq %zmm1, %zmm0, %zmm2
+; AVX512BW-NEXT: vmovdqu8 {{.*#+}} zmm3 = [0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4,0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4,0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4,0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4]
+; AVX512BW-NEXT: vpshufb %zmm2, %zmm3, %zmm2
+; AVX512BW-NEXT: vpternlogd $255, %zmm4, %zmm4, %zmm4
+; AVX512BW-NEXT: vpxorq %zmm4, %zmm0, %zmm0
+; AVX512BW-NEXT: vpsrlw $4, %zmm0, %zmm0
+; AVX512BW-NEXT: vpandq %zmm1, %zmm0, %zmm0
+; AVX512BW-NEXT: vpshufb %zmm0, %zmm3, %zmm0
+; AVX512BW-NEXT: vpaddb %zmm2, %zmm0, %zmm0
+; AVX512BW-NEXT: vpxord %zmm1, %zmm1, %zmm1
+; AVX512BW-NEXT: vpunpckhdq {{.*#+}} zmm2 = zmm0[2],zmm1[2],zmm0[3],zmm1[3],zmm0[6],zmm1[6],zmm0[7],zmm1[7],zmm0[10],zmm1[10],zmm0[11],zmm1[11],zmm0[14],zmm1[14],zmm0[15],zmm1[15]
+; AVX512BW-NEXT: vpsadbw %zmm1, %zmm2, %zmm2
+; AVX512BW-NEXT: vpunpckldq {{.*#+}} zmm0 = zmm0[0],zmm1[0],zmm0[1],zmm1[1],zmm0[4],zmm1[4],zmm0[5],zmm1[5],zmm0[8],zmm1[8],zmm0[9],zmm1[9],zmm0[12],zmm1[12],zmm0[13],zmm1[13]
+; AVX512BW-NEXT: vpsadbw %zmm1, %zmm0, %zmm0
+; AVX512BW-NEXT: vpackuswb %zmm2, %zmm0, %zmm0
+; AVX512BW-NEXT: retq
+;
+; AVX512DQ-LABEL: testv16i32:
+; AVX512DQ: ## BB#0:
+; AVX512DQ-NEXT: vpsrld $1, %zmm0, %zmm1
+; AVX512DQ-NEXT: vpord %zmm1, %zmm0, %zmm0
+; AVX512DQ-NEXT: vpsrld $2, %zmm0, %zmm1
+; AVX512DQ-NEXT: vporq %zmm1, %zmm0, %zmm0
+; AVX512DQ-NEXT: vpsrld $4, %zmm0, %zmm1
+; AVX512DQ-NEXT: vporq %zmm1, %zmm0, %zmm0
+; AVX512DQ-NEXT: vpsrld $8, %zmm0, %zmm1
+; AVX512DQ-NEXT: vporq %zmm1, %zmm0, %zmm0
+; AVX512DQ-NEXT: vpsrld $16, %zmm0, %zmm1
+; AVX512DQ-NEXT: vporq %zmm1, %zmm0, %zmm0
+; AVX512DQ-NEXT: vpternlogd $255, %zmm1, %zmm1, %zmm1
+; AVX512DQ-NEXT: vpxorq %zmm1, %zmm0, %zmm0
+; AVX512DQ-NEXT: vextracti64x4 $1, %zmm0, %ymm1
+; AVX512DQ-NEXT: vmovdqa {{.*#+}} ymm2 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
+; AVX512DQ-NEXT: vpand %ymm2, %ymm1, %ymm3
+; AVX512DQ-NEXT: vmovdqa {{.*#+}} ymm4 = [0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4,0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4]
+; AVX512DQ-NEXT: vpshufb %ymm3, %ymm4, %ymm3
+; AVX512DQ-NEXT: vpsrlw $4, %ymm1, %ymm1
+; AVX512DQ-NEXT: vpand %ymm2, %ymm1, %ymm1
+; AVX512DQ-NEXT: vpshufb %ymm1, %ymm4, %ymm1
+; AVX512DQ-NEXT: vpaddb %ymm3, %ymm1, %ymm1
+; AVX512DQ-NEXT: vpxor %ymm3, %ymm3, %ymm3
+; AVX512DQ-NEXT: vpunpckhdq {{.*#+}} ymm5 = ymm1[2],ymm3[2],ymm1[3],ymm3[3],ymm1[6],ymm3[6],ymm1[7],ymm3[7]
+; AVX512DQ-NEXT: vpsadbw %ymm3, %ymm5, %ymm5
+; AVX512DQ-NEXT: vpunpckldq {{.*#+}} ymm1 = ymm1[0],ymm3[0],ymm1[1],ymm3[1],ymm1[4],ymm3[4],ymm1[5],ymm3[5]
+; AVX512DQ-NEXT: vpsadbw %ymm3, %ymm1, %ymm1
+; AVX512DQ-NEXT: vpackuswb %ymm5, %ymm1, %ymm1
+; AVX512DQ-NEXT: vpand %ymm2, %ymm0, %ymm5
+; AVX512DQ-NEXT: vpshufb %ymm5, %ymm4, %ymm5
+; AVX512DQ-NEXT: vpsrlw $4, %ymm0, %ymm0
+; AVX512DQ-NEXT: vpand %ymm2, %ymm0, %ymm0
+; AVX512DQ-NEXT: vpshufb %ymm0, %ymm4, %ymm0
+; AVX512DQ-NEXT: vpaddb %ymm5, %ymm0, %ymm0
+; AVX512DQ-NEXT: vpunpckhdq {{.*#+}} ymm2 = ymm0[2],ymm3[2],ymm0[3],ymm3[3],ymm0[6],ymm3[6],ymm0[7],ymm3[7]
+; AVX512DQ-NEXT: vpsadbw %ymm3, %ymm2, %ymm2
+; AVX512DQ-NEXT: vpunpckldq {{.*#+}} ymm0 = ymm0[0],ymm3[0],ymm0[1],ymm3[1],ymm0[4],ymm3[4],ymm0[5],ymm3[5]
+; AVX512DQ-NEXT: vpsadbw %ymm3, %ymm0, %ymm0
+; AVX512DQ-NEXT: vpackuswb %ymm2, %ymm0, %ymm0
+; AVX512DQ-NEXT: vinserti64x4 $1, %ymm1, %zmm0, %zmm0
+; AVX512DQ-NEXT: retq
%out = call <16 x i32> @llvm.ctlz.v16i32(<16 x i32> %in, i1 0)
ret <16 x i32> %out
}
define <16 x i32> @testv16i32u(<16 x i32> %in) nounwind {
-; ALL-LABEL: testv16i32u:
-; ALL: ## BB#0:
-; ALL-NEXT: vplzcntd %zmm0, %zmm0
-; ALL-NEXT: retq
+; AVX512CD-LABEL: testv16i32u:
+; AVX512CD: ## BB#0:
+; AVX512CD-NEXT: vplzcntd %zmm0, %zmm0
+; AVX512CD-NEXT: retq
+;
+; AVX512CDBW-LABEL: testv16i32u:
+; AVX512CDBW: ## BB#0:
+; AVX512CDBW-NEXT: vplzcntd %zmm0, %zmm0
+; AVX512CDBW-NEXT: retq
+;
+; AVX512BW-LABEL: testv16i32u:
+; AVX512BW: ## BB#0:
+; AVX512BW-NEXT: vpsrld $1, %zmm0, %zmm1
+; AVX512BW-NEXT: vpord %zmm1, %zmm0, %zmm0
+; AVX512BW-NEXT: vpsrld $2, %zmm0, %zmm1
+; AVX512BW-NEXT: vporq %zmm1, %zmm0, %zmm0
+; AVX512BW-NEXT: vpsrld $4, %zmm0, %zmm1
+; AVX512BW-NEXT: vporq %zmm1, %zmm0, %zmm0
+; AVX512BW-NEXT: vpsrld $8, %zmm0, %zmm1
+; AVX512BW-NEXT: vporq %zmm1, %zmm0, %zmm0
+; AVX512BW-NEXT: vpsrld $16, %zmm0, %zmm1
+; AVX512BW-NEXT: vporq %zmm1, %zmm0, %zmm0
+; AVX512BW-NEXT: vmovdqa64 {{.*#+}} zmm1 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
+; AVX512BW-NEXT: vpandnq %zmm1, %zmm0, %zmm2
+; AVX512BW-NEXT: vmovdqu8 {{.*#+}} zmm3 = [0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4,0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4,0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4,0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4]
+; AVX512BW-NEXT: vpshufb %zmm2, %zmm3, %zmm2
+; AVX512BW-NEXT: vpternlogd $255, %zmm4, %zmm4, %zmm4
+; AVX512BW-NEXT: vpxorq %zmm4, %zmm0, %zmm0
+; AVX512BW-NEXT: vpsrlw $4, %zmm0, %zmm0
+; AVX512BW-NEXT: vpandq %zmm1, %zmm0, %zmm0
+; AVX512BW-NEXT: vpshufb %zmm0, %zmm3, %zmm0
+; AVX512BW-NEXT: vpaddb %zmm2, %zmm0, %zmm0
+; AVX512BW-NEXT: vpxord %zmm1, %zmm1, %zmm1
+; AVX512BW-NEXT: vpunpckhdq {{.*#+}} zmm2 = zmm0[2],zmm1[2],zmm0[3],zmm1[3],zmm0[6],zmm1[6],zmm0[7],zmm1[7],zmm0[10],zmm1[10],zmm0[11],zmm1[11],zmm0[14],zmm1[14],zmm0[15],zmm1[15]
+; AVX512BW-NEXT: vpsadbw %zmm1, %zmm2, %zmm2
+; AVX512BW-NEXT: vpunpckldq {{.*#+}} zmm0 = zmm0[0],zmm1[0],zmm0[1],zmm1[1],zmm0[4],zmm1[4],zmm0[5],zmm1[5],zmm0[8],zmm1[8],zmm0[9],zmm1[9],zmm0[12],zmm1[12],zmm0[13],zmm1[13]
+; AVX512BW-NEXT: vpsadbw %zmm1, %zmm0, %zmm0
+; AVX512BW-NEXT: vpackuswb %zmm2, %zmm0, %zmm0
+; AVX512BW-NEXT: retq
+;
+; AVX512DQ-LABEL: testv16i32u:
+; AVX512DQ: ## BB#0:
+; AVX512DQ-NEXT: vpsrld $1, %zmm0, %zmm1
+; AVX512DQ-NEXT: vpord %zmm1, %zmm0, %zmm0
+; AVX512DQ-NEXT: vpsrld $2, %zmm0, %zmm1
+; AVX512DQ-NEXT: vporq %zmm1, %zmm0, %zmm0
+; AVX512DQ-NEXT: vpsrld $4, %zmm0, %zmm1
+; AVX512DQ-NEXT: vporq %zmm1, %zmm0, %zmm0
+; AVX512DQ-NEXT: vpsrld $8, %zmm0, %zmm1
+; AVX512DQ-NEXT: vporq %zmm1, %zmm0, %zmm0
+; AVX512DQ-NEXT: vpsrld $16, %zmm0, %zmm1
+; AVX512DQ-NEXT: vporq %zmm1, %zmm0, %zmm0
+; AVX512DQ-NEXT: vpternlogd $255, %zmm1, %zmm1, %zmm1
+; AVX512DQ-NEXT: vpxorq %zmm1, %zmm0, %zmm0
+; AVX512DQ-NEXT: vextracti64x4 $1, %zmm0, %ymm1
+; AVX512DQ-NEXT: vmovdqa {{.*#+}} ymm2 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
+; AVX512DQ-NEXT: vpand %ymm2, %ymm1, %ymm3
+; AVX512DQ-NEXT: vmovdqa {{.*#+}} ymm4 = [0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4,0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4]
+; AVX512DQ-NEXT: vpshufb %ymm3, %ymm4, %ymm3
+; AVX512DQ-NEXT: vpsrlw $4, %ymm1, %ymm1
+; AVX512DQ-NEXT: vpand %ymm2, %ymm1, %ymm1
+; AVX512DQ-NEXT: vpshufb %ymm1, %ymm4, %ymm1
+; AVX512DQ-NEXT: vpaddb %ymm3, %ymm1, %ymm1
+; AVX512DQ-NEXT: vpxor %ymm3, %ymm3, %ymm3
+; AVX512DQ-NEXT: vpunpckhdq {{.*#+}} ymm5 = ymm1[2],ymm3[2],ymm1[3],ymm3[3],ymm1[6],ymm3[6],ymm1[7],ymm3[7]
+; AVX512DQ-NEXT: vpsadbw %ymm3, %ymm5, %ymm5
+; AVX512DQ-NEXT: vpunpckldq {{.*#+}} ymm1 = ymm1[0],ymm3[0],ymm1[1],ymm3[1],ymm1[4],ymm3[4],ymm1[5],ymm3[5]
+; AVX512DQ-NEXT: vpsadbw %ymm3, %ymm1, %ymm1
+; AVX512DQ-NEXT: vpackuswb %ymm5, %ymm1, %ymm1
+; AVX512DQ-NEXT: vpand %ymm2, %ymm0, %ymm5
+; AVX512DQ-NEXT: vpshufb %ymm5, %ymm4, %ymm5
+; AVX512DQ-NEXT: vpsrlw $4, %ymm0, %ymm0
+; AVX512DQ-NEXT: vpand %ymm2, %ymm0, %ymm0
+; AVX512DQ-NEXT: vpshufb %ymm0, %ymm4, %ymm0
+; AVX512DQ-NEXT: vpaddb %ymm5, %ymm0, %ymm0
+; AVX512DQ-NEXT: vpunpckhdq {{.*#+}} ymm2 = ymm0[2],ymm3[2],ymm0[3],ymm3[3],ymm0[6],ymm3[6],ymm0[7],ymm3[7]
+; AVX512DQ-NEXT: vpsadbw %ymm3, %ymm2, %ymm2
+; AVX512DQ-NEXT: vpunpckldq {{.*#+}} ymm0 = ymm0[0],ymm3[0],ymm0[1],ymm3[1],ymm0[4],ymm3[4],ymm0[5],ymm3[5]
+; AVX512DQ-NEXT: vpsadbw %ymm3, %ymm0, %ymm0
+; AVX512DQ-NEXT: vpackuswb %ymm2, %ymm0, %ymm0
+; AVX512DQ-NEXT: vinserti64x4 $1, %ymm1, %zmm0, %zmm0
+; AVX512DQ-NEXT: retq
%out = call <16 x i32> @llvm.ctlz.v16i32(<16 x i32> %in, i1 -1)
ret <16 x i32> %out
}
@@ -52,20 +350,78 @@ define <32 x i16> @testv32i16(<32 x i16> %in) nounwind {
; AVX512CD-NEXT: vpsubw %ymm2, %ymm1, %ymm1
; AVX512CD-NEXT: retq
;
+; AVX512CDBW-LABEL: testv32i16:
+; AVX512CDBW: ## BB#0:
+; AVX512CDBW-NEXT: vextracti64x4 $1, %zmm0, %ymm1
+; AVX512CDBW-NEXT: vpmovzxwd {{.*#+}} zmm1 = ymm1[0],zero,ymm1[1],zero,ymm1[2],zero,ymm1[3],zero,ymm1[4],zero,ymm1[5],zero,ymm1[6],zero,ymm1[7],zero,ymm1[8],zero,ymm1[9],zero,ymm1[10],zero,ymm1[11],zero,ymm1[12],zero,ymm1[13],zero,ymm1[14],zero,ymm1[15],zero
+; AVX512CDBW-NEXT: vplzcntd %zmm1, %zmm1
+; AVX512CDBW-NEXT: vpmovdw %zmm1, %ymm1
+; AVX512CDBW-NEXT: vmovdqa {{.*#+}} ymm2 = [16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16]
+; AVX512CDBW-NEXT: vpsubw %ymm2, %ymm1, %ymm1
+; AVX512CDBW-NEXT: vpmovzxwd {{.*#+}} zmm0 = ymm0[0],zero,ymm0[1],zero,ymm0[2],zero,ymm0[3],zero,ymm0[4],zero,ymm0[5],zero,ymm0[6],zero,ymm0[7],zero,ymm0[8],zero,ymm0[9],zero,ymm0[10],zero,ymm0[11],zero,ymm0[12],zero,ymm0[13],zero,ymm0[14],zero,ymm0[15],zero
+; AVX512CDBW-NEXT: vplzcntd %zmm0, %zmm0
+; AVX512CDBW-NEXT: vpmovdw %zmm0, %ymm0
+; AVX512CDBW-NEXT: vpsubw %ymm2, %ymm0, %ymm0
+; AVX512CDBW-NEXT: vinserti64x4 $1, %ymm1, %zmm0, %zmm0
+; AVX512CDBW-NEXT: retq
+;
; AVX512BW-LABEL: testv32i16:
; AVX512BW: ## BB#0:
-; AVX512BW-NEXT: vextracti64x4 $1, %zmm0, %ymm1
-; AVX512BW-NEXT: vpmovzxwd {{.*#+}} zmm1 = ymm1[0],zero,ymm1[1],zero,ymm1[2],zero,ymm1[3],zero,ymm1[4],zero,ymm1[5],zero,ymm1[6],zero,ymm1[7],zero,ymm1[8],zero,ymm1[9],zero,ymm1[10],zero,ymm1[11],zero,ymm1[12],zero,ymm1[13],zero,ymm1[14],zero,ymm1[15],zero
-; AVX512BW-NEXT: vplzcntd %zmm1, %zmm1
-; AVX512BW-NEXT: vpmovdw %zmm1, %ymm1
-; AVX512BW-NEXT: vmovdqa {{.*#+}} ymm2 = [16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16]
-; AVX512BW-NEXT: vpsubw %ymm2, %ymm1, %ymm1
-; AVX512BW-NEXT: vpmovzxwd {{.*#+}} zmm0 = ymm0[0],zero,ymm0[1],zero,ymm0[2],zero,ymm0[3],zero,ymm0[4],zero,ymm0[5],zero,ymm0[6],zero,ymm0[7],zero,ymm0[8],zero,ymm0[9],zero,ymm0[10],zero,ymm0[11],zero,ymm0[12],zero,ymm0[13],zero,ymm0[14],zero,ymm0[15],zero
-; AVX512BW-NEXT: vplzcntd %zmm0, %zmm0
-; AVX512BW-NEXT: vpmovdw %zmm0, %ymm0
-; AVX512BW-NEXT: vpsubw %ymm2, %ymm0, %ymm0
-; AVX512BW-NEXT: vinserti64x4 $1, %ymm1, %zmm0, %zmm0
+; AVX512BW-NEXT: vpsrlw $1, %zmm0, %zmm1
+; AVX512BW-NEXT: vporq %zmm1, %zmm0, %zmm0
+; AVX512BW-NEXT: vpsrlw $2, %zmm0, %zmm1
+; AVX512BW-NEXT: vporq %zmm1, %zmm0, %zmm0
+; AVX512BW-NEXT: vpsrlw $4, %zmm0, %zmm1
+; AVX512BW-NEXT: vporq %zmm1, %zmm0, %zmm0
+; AVX512BW-NEXT: vpsrlw $8, %zmm0, %zmm1
+; AVX512BW-NEXT: vporq %zmm1, %zmm0, %zmm0
+; AVX512BW-NEXT: vmovdqa64 {{.*#+}} zmm1 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
+; AVX512BW-NEXT: vpandnq %zmm1, %zmm0, %zmm2
+; AVX512BW-NEXT: vmovdqu8 {{.*#+}} zmm3 = [0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4,0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4,0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4,0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4]
+; AVX512BW-NEXT: vpshufb %zmm2, %zmm3, %zmm2
+; AVX512BW-NEXT: vpternlogd $255, %zmm4, %zmm4, %zmm4
+; AVX512BW-NEXT: vpxorq %zmm4, %zmm0, %zmm0
+; AVX512BW-NEXT: vpsrlw $4, %zmm0, %zmm0
+; AVX512BW-NEXT: vpandq %zmm1, %zmm0, %zmm0
+; AVX512BW-NEXT: vpshufb %zmm0, %zmm3, %zmm0
+; AVX512BW-NEXT: vpaddb %zmm2, %zmm0, %zmm0
+; AVX512BW-NEXT: vpsllw $8, %zmm0, %zmm1
+; AVX512BW-NEXT: vpaddb %zmm0, %zmm1, %zmm0
+; AVX512BW-NEXT: vpsrlw $8, %zmm0, %zmm0
; AVX512BW-NEXT: retq
+;
+; AVX512DQ-LABEL: testv32i16:
+; AVX512DQ: ## BB#0:
+; AVX512DQ-NEXT: vmovdqa {{.*#+}} ymm2 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
+; AVX512DQ-NEXT: vpand %ymm2, %ymm0, %ymm3
+; AVX512DQ-NEXT: vmovdqa {{.*#+}} ymm4 = [4,3,2,2,1,1,1,1,0,0,0,0,0,0,0,0,4,3,2,2,1,1,1,1,0,0,0,0,0,0,0,0]
+; AVX512DQ-NEXT: vpshufb %ymm3, %ymm4, %ymm3
+; AVX512DQ-NEXT: vpsrlw $4, %ymm0, %ymm5
+; AVX512DQ-NEXT: vpand %ymm2, %ymm5, %ymm5
+; AVX512DQ-NEXT: vpxor %ymm6, %ymm6, %ymm6
+; AVX512DQ-NEXT: vpcmpeqb %ymm6, %ymm5, %ymm7
+; AVX512DQ-NEXT: vpand %ymm7, %ymm3, %ymm3
+; AVX512DQ-NEXT: vpshufb %ymm5, %ymm4, %ymm5
+; AVX512DQ-NEXT: vpaddb %ymm5, %ymm3, %ymm3
+; AVX512DQ-NEXT: vpcmpeqb %ymm6, %ymm0, %ymm0
+; AVX512DQ-NEXT: vpsrlw $8, %ymm0, %ymm0
+; AVX512DQ-NEXT: vpand %ymm0, %ymm3, %ymm0
+; AVX512DQ-NEXT: vpsrlw $8, %ymm3, %ymm3
+; AVX512DQ-NEXT: vpaddw %ymm0, %ymm3, %ymm0
+; AVX512DQ-NEXT: vpand %ymm2, %ymm1, %ymm3
+; AVX512DQ-NEXT: vpshufb %ymm3, %ymm4, %ymm3
+; AVX512DQ-NEXT: vpsrlw $4, %ymm1, %ymm5
+; AVX512DQ-NEXT: vpand %ymm2, %ymm5, %ymm2
+; AVX512DQ-NEXT: vpcmpeqb %ymm6, %ymm2, %ymm5
+; AVX512DQ-NEXT: vpand %ymm5, %ymm3, %ymm3
+; AVX512DQ-NEXT: vpshufb %ymm2, %ymm4, %ymm2
+; AVX512DQ-NEXT: vpaddb %ymm2, %ymm3, %ymm2
+; AVX512DQ-NEXT: vpcmpeqb %ymm6, %ymm1, %ymm1
+; AVX512DQ-NEXT: vpsrlw $8, %ymm1, %ymm1
+; AVX512DQ-NEXT: vpand %ymm1, %ymm2, %ymm1
+; AVX512DQ-NEXT: vpsrlw $8, %ymm2, %ymm2
+; AVX512DQ-NEXT: vpaddw %ymm1, %ymm2, %ymm1
+; AVX512DQ-NEXT: retq
%out = call <32 x i16> @llvm.ctlz.v32i16(<32 x i16> %in, i1 0)
ret <32 x i16> %out
}
@@ -84,20 +440,78 @@ define <32 x i16> @testv32i16u(<32 x i16> %in) nounwind {
; AVX512CD-NEXT: vpsubw %ymm2, %ymm1, %ymm1
; AVX512CD-NEXT: retq
;
+; AVX512CDBW-LABEL: testv32i16u:
+; AVX512CDBW: ## BB#0:
+; AVX512CDBW-NEXT: vextracti64x4 $1, %zmm0, %ymm1
+; AVX512CDBW-NEXT: vpmovzxwd {{.*#+}} zmm1 = ymm1[0],zero,ymm1[1],zero,ymm1[2],zero,ymm1[3],zero,ymm1[4],zero,ymm1[5],zero,ymm1[6],zero,ymm1[7],zero,ymm1[8],zero,ymm1[9],zero,ymm1[10],zero,ymm1[11],zero,ymm1[12],zero,ymm1[13],zero,ymm1[14],zero,ymm1[15],zero
+; AVX512CDBW-NEXT: vplzcntd %zmm1, %zmm1
+; AVX512CDBW-NEXT: vpmovdw %zmm1, %ymm1
+; AVX512CDBW-NEXT: vmovdqa {{.*#+}} ymm2 = [16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16]
+; AVX512CDBW-NEXT: vpsubw %ymm2, %ymm1, %ymm1
+; AVX512CDBW-NEXT: vpmovzxwd {{.*#+}} zmm0 = ymm0[0],zero,ymm0[1],zero,ymm0[2],zero,ymm0[3],zero,ymm0[4],zero,ymm0[5],zero,ymm0[6],zero,ymm0[7],zero,ymm0[8],zero,ymm0[9],zero,ymm0[10],zero,ymm0[11],zero,ymm0[12],zero,ymm0[13],zero,ymm0[14],zero,ymm0[15],zero
+; AVX512CDBW-NEXT: vplzcntd %zmm0, %zmm0
+; AVX512CDBW-NEXT: vpmovdw %zmm0, %ymm0
+; AVX512CDBW-NEXT: vpsubw %ymm2, %ymm0, %ymm0
+; AVX512CDBW-NEXT: vinserti64x4 $1, %ymm1, %zmm0, %zmm0
+; AVX512CDBW-NEXT: retq
+;
; AVX512BW-LABEL: testv32i16u:
; AVX512BW: ## BB#0:
-; AVX512BW-NEXT: vextracti64x4 $1, %zmm0, %ymm1
-; AVX512BW-NEXT: vpmovzxwd {{.*#+}} zmm1 = ymm1[0],zero,ymm1[1],zero,ymm1[2],zero,ymm1[3],zero,ymm1[4],zero,ymm1[5],zero,ymm1[6],zero,ymm1[7],zero,ymm1[8],zero,ymm1[9],zero,ymm1[10],zero,ymm1[11],zero,ymm1[12],zero,ymm1[13],zero,ymm1[14],zero,ymm1[15],zero
-; AVX512BW-NEXT: vplzcntd %zmm1, %zmm1
-; AVX512BW-NEXT: vpmovdw %zmm1, %ymm1
-; AVX512BW-NEXT: vmovdqa {{.*#+}} ymm2 = [16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16]
-; AVX512BW-NEXT: vpsubw %ymm2, %ymm1, %ymm1
-; AVX512BW-NEXT: vpmovzxwd {{.*#+}} zmm0 = ymm0[0],zero,ymm0[1],zero,ymm0[2],zero,ymm0[3],zero,ymm0[4],zero,ymm0[5],zero,ymm0[6],zero,ymm0[7],zero,ymm0[8],zero,ymm0[9],zero,ymm0[10],zero,ymm0[11],zero,ymm0[12],zero,ymm0[13],zero,ymm0[14],zero,ymm0[15],zero
-; AVX512BW-NEXT: vplzcntd %zmm0, %zmm0
-; AVX512BW-NEXT: vpmovdw %zmm0, %ymm0
-; AVX512BW-NEXT: vpsubw %ymm2, %ymm0, %ymm0
-; AVX512BW-NEXT: vinserti64x4 $1, %ymm1, %zmm0, %zmm0
+; AVX512BW-NEXT: vpsrlw $1, %zmm0, %zmm1
+; AVX512BW-NEXT: vporq %zmm1, %zmm0, %zmm0
+; AVX512BW-NEXT: vpsrlw $2, %zmm0, %zmm1
+; AVX512BW-NEXT: vporq %zmm1, %zmm0, %zmm0
+; AVX512BW-NEXT: vpsrlw $4, %zmm0, %zmm1
+; AVX512BW-NEXT: vporq %zmm1, %zmm0, %zmm0
+; AVX512BW-NEXT: vpsrlw $8, %zmm0, %zmm1
+; AVX512BW-NEXT: vporq %zmm1, %zmm0, %zmm0
+; AVX512BW-NEXT: vmovdqa64 {{.*#+}} zmm1 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
+; AVX512BW-NEXT: vpandnq %zmm1, %zmm0, %zmm2
+; AVX512BW-NEXT: vmovdqu8 {{.*#+}} zmm3 = [0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4,0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4,0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4,0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4]
+; AVX512BW-NEXT: vpshufb %zmm2, %zmm3, %zmm2
+; AVX512BW-NEXT: vpternlogd $255, %zmm4, %zmm4, %zmm4
+; AVX512BW-NEXT: vpxorq %zmm4, %zmm0, %zmm0
+; AVX512BW-NEXT: vpsrlw $4, %zmm0, %zmm0
+; AVX512BW-NEXT: vpandq %zmm1, %zmm0, %zmm0
+; AVX512BW-NEXT: vpshufb %zmm0, %zmm3, %zmm0
+; AVX512BW-NEXT: vpaddb %zmm2, %zmm0, %zmm0
+; AVX512BW-NEXT: vpsllw $8, %zmm0, %zmm1
+; AVX512BW-NEXT: vpaddb %zmm0, %zmm1, %zmm0
+; AVX512BW-NEXT: vpsrlw $8, %zmm0, %zmm0
; AVX512BW-NEXT: retq
+;
+; AVX512DQ-LABEL: testv32i16u:
+; AVX512DQ: ## BB#0:
+; AVX512DQ-NEXT: vmovdqa {{.*#+}} ymm2 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
+; AVX512DQ-NEXT: vpand %ymm2, %ymm0, %ymm3
+; AVX512DQ-NEXT: vmovdqa {{.*#+}} ymm4 = [4,3,2,2,1,1,1,1,0,0,0,0,0,0,0,0,4,3,2,2,1,1,1,1,0,0,0,0,0,0,0,0]
+; AVX512DQ-NEXT: vpshufb %ymm3, %ymm4, %ymm3
+; AVX512DQ-NEXT: vpsrlw $4, %ymm0, %ymm5
+; AVX512DQ-NEXT: vpand %ymm2, %ymm5, %ymm5
+; AVX512DQ-NEXT: vpxor %ymm6, %ymm6, %ymm6
+; AVX512DQ-NEXT: vpcmpeqb %ymm6, %ymm5, %ymm7
+; AVX512DQ-NEXT: vpand %ymm7, %ymm3, %ymm3
+; AVX512DQ-NEXT: vpshufb %ymm5, %ymm4, %ymm5
+; AVX512DQ-NEXT: vpaddb %ymm5, %ymm3, %ymm3
+; AVX512DQ-NEXT: vpcmpeqb %ymm6, %ymm0, %ymm0
+; AVX512DQ-NEXT: vpsrlw $8, %ymm0, %ymm0
+; AVX512DQ-NEXT: vpand %ymm0, %ymm3, %ymm0
+; AVX512DQ-NEXT: vpsrlw $8, %ymm3, %ymm3
+; AVX512DQ-NEXT: vpaddw %ymm0, %ymm3, %ymm0
+; AVX512DQ-NEXT: vpand %ymm2, %ymm1, %ymm3
+; AVX512DQ-NEXT: vpshufb %ymm3, %ymm4, %ymm3
+; AVX512DQ-NEXT: vpsrlw $4, %ymm1, %ymm5
+; AVX512DQ-NEXT: vpand %ymm2, %ymm5, %ymm2
+; AVX512DQ-NEXT: vpcmpeqb %ymm6, %ymm2, %ymm5
+; AVX512DQ-NEXT: vpand %ymm5, %ymm3, %ymm3
+; AVX512DQ-NEXT: vpshufb %ymm2, %ymm4, %ymm2
+; AVX512DQ-NEXT: vpaddb %ymm2, %ymm3, %ymm2
+; AVX512DQ-NEXT: vpcmpeqb %ymm6, %ymm1, %ymm1
+; AVX512DQ-NEXT: vpsrlw $8, %ymm1, %ymm1
+; AVX512DQ-NEXT: vpand %ymm1, %ymm2, %ymm1
+; AVX512DQ-NEXT: vpsrlw $8, %ymm2, %ymm2
+; AVX512DQ-NEXT: vpaddw %ymm1, %ymm2, %ymm1
+; AVX512DQ-NEXT: retq
%out = call <32 x i16> @llvm.ctlz.v32i16(<32 x i16> %in, i1 -1)
ret <32 x i16> %out
}
@@ -128,32 +542,78 @@ define <64 x i8> @testv64i8(<64 x i8> %in) nounwind {
; AVX512CD-NEXT: vinserti128 $1, %xmm2, %ymm1, %ymm1
; AVX512CD-NEXT: retq
;
+; AVX512CDBW-LABEL: testv64i8:
+; AVX512CDBW: ## BB#0:
+; AVX512CDBW-NEXT: vextracti64x4 $1, %zmm0, %ymm1
+; AVX512CDBW-NEXT: vextracti128 $1, %ymm1, %xmm2
+; AVX512CDBW-NEXT: vpmovzxbd {{.*#+}} zmm2 = xmm2[0],zero,zero,zero,xmm2[1],zero,zero,zero,xmm2[2],zero,zero,zero,xmm2[3],zero,zero,zero,xmm2[4],zero,zero,zero,xmm2[5],zero,zero,zero,xmm2[6],zero,zero,zero,xmm2[7],zero,zero,zero,xmm2[8],zero,zero,zero,xmm2[9],zero,zero,zero,xmm2[10],zero,zero,zero,xmm2[11],zero,zero,zero,xmm2[12],zero,zero,zero,xmm2[13],zero,zero,zero,xmm2[14],zero,zero,zero,xmm2[15],zero,zero,zero
+; AVX512CDBW-NEXT: vplzcntd %zmm2, %zmm2
+; AVX512CDBW-NEXT: vpmovdb %zmm2, %xmm2
+; AVX512CDBW-NEXT: vmovdqa {{.*#+}} xmm3 = [24,24,24,24,24,24,24,24,24,24,24,24,24,24,24,24]
+; AVX512CDBW-NEXT: vpsubb %xmm3, %xmm2, %xmm2
+; AVX512CDBW-NEXT: vpmovzxbd {{.*#+}} zmm1 = xmm1[0],zero,zero,zero,xmm1[1],zero,zero,zero,xmm1[2],zero,zero,zero,xmm1[3],zero,zero,zero,xmm1[4],zero,zero,zero,xmm1[5],zero,zero,zero,xmm1[6],zero,zero,zero,xmm1[7],zero,zero,zero,xmm1[8],zero,zero,zero,xmm1[9],zero,zero,zero,xmm1[10],zero,zero,zero,xmm1[11],zero,zero,zero,xmm1[12],zero,zero,zero,xmm1[13],zero,zero,zero,xmm1[14],zero,zero,zero,xmm1[15],zero,zero,zero
+; AVX512CDBW-NEXT: vplzcntd %zmm1, %zmm1
+; AVX512CDBW-NEXT: vpmovdb %zmm1, %xmm1
+; AVX512CDBW-NEXT: vpsubb %xmm3, %xmm1, %xmm1
+; AVX512CDBW-NEXT: vinserti128 $1, %xmm2, %ymm1, %ymm1
+; AVX512CDBW-NEXT: vextracti128 $1, %ymm0, %xmm2
+; AVX512CDBW-NEXT: vpmovzxbd {{.*#+}} zmm2 = xmm2[0],zero,zero,zero,xmm2[1],zero,zero,zero,xmm2[2],zero,zero,zero,xmm2[3],zero,zero,zero,xmm2[4],zero,zero,zero,xmm2[5],zero,zero,zero,xmm2[6],zero,zero,zero,xmm2[7],zero,zero,zero,xmm2[8],zero,zero,zero,xmm2[9],zero,zero,zero,xmm2[10],zero,zero,zero,xmm2[11],zero,zero,zero,xmm2[12],zero,zero,zero,xmm2[13],zero,zero,zero,xmm2[14],zero,zero,zero,xmm2[15],zero,zero,zero
+; AVX512CDBW-NEXT: vplzcntd %zmm2, %zmm2
+; AVX512CDBW-NEXT: vpmovdb %zmm2, %xmm2
+; AVX512CDBW-NEXT: vpsubb %xmm3, %xmm2, %xmm2
+; AVX512CDBW-NEXT: vpmovzxbd {{.*#+}} zmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero,xmm0[4],zero,zero,zero,xmm0[5],zero,zero,zero,xmm0[6],zero,zero,zero,xmm0[7],zero,zero,zero,xmm0[8],zero,zero,zero,xmm0[9],zero,zero,zero,xmm0[10],zero,zero,zero,xmm0[11],zero,zero,zero,xmm0[12],zero,zero,zero,xmm0[13],zero,zero,zero,xmm0[14],zero,zero,zero,xmm0[15],zero,zero,zero
+; AVX512CDBW-NEXT: vplzcntd %zmm0, %zmm0
+; AVX512CDBW-NEXT: vpmovdb %zmm0, %xmm0
+; AVX512CDBW-NEXT: vpsubb %xmm3, %xmm0, %xmm0
+; AVX512CDBW-NEXT: vinserti128 $1, %xmm2, %ymm0, %ymm0
+; AVX512CDBW-NEXT: vinserti64x4 $1, %ymm1, %zmm0, %zmm0
+; AVX512CDBW-NEXT: retq
+;
; AVX512BW-LABEL: testv64i8:
; AVX512BW: ## BB#0:
-; AVX512BW-NEXT: vextracti64x4 $1, %zmm0, %ymm1
-; AVX512BW-NEXT: vextracti128 $1, %ymm1, %xmm2
-; AVX512BW-NEXT: vpmovzxbd {{.*#+}} zmm2 = xmm2[0],zero,zero,zero,xmm2[1],zero,zero,zero,xmm2[2],zero,zero,zero,xmm2[3],zero,zero,zero,xmm2[4],zero,zero,zero,xmm2[5],zero,zero,zero,xmm2[6],zero,zero,zero,xmm2[7],zero,zero,zero,xmm2[8],zero,zero,zero,xmm2[9],zero,zero,zero,xmm2[10],zero,zero,zero,xmm2[11],zero,zero,zero,xmm2[12],zero,zero,zero,xmm2[13],zero,zero,zero,xmm2[14],zero,zero,zero,xmm2[15],zero,zero,zero
-; AVX512BW-NEXT: vplzcntd %zmm2, %zmm2
-; AVX512BW-NEXT: vpmovdb %zmm2, %xmm2
-; AVX512BW-NEXT: vmovdqa {{.*#+}} xmm3 = [24,24,24,24,24,24,24,24,24,24,24,24,24,24,24,24]
-; AVX512BW-NEXT: vpsubb %xmm3, %xmm2, %xmm2
-; AVX512BW-NEXT: vpmovzxbd {{.*#+}} zmm1 = xmm1[0],zero,zero,zero,xmm1[1],zero,zero,zero,xmm1[2],zero,zero,zero,xmm1[3],zero,zero,zero,xmm1[4],zero,zero,zero,xmm1[5],zero,zero,zero,xmm1[6],zero,zero,zero,xmm1[7],zero,zero,zero,xmm1[8],zero,zero,zero,xmm1[9],zero,zero,zero,xmm1[10],zero,zero,zero,xmm1[11],zero,zero,zero,xmm1[12],zero,zero,zero,xmm1[13],zero,zero,zero,xmm1[14],zero,zero,zero,xmm1[15],zero,zero,zero
-; AVX512BW-NEXT: vplzcntd %zmm1, %zmm1
-; AVX512BW-NEXT: vpmovdb %zmm1, %xmm1
-; AVX512BW-NEXT: vpsubb %xmm3, %xmm1, %xmm1
-; AVX512BW-NEXT: vinserti128 $1, %xmm2, %ymm1, %ymm1
-; AVX512BW-NEXT: vextracti128 $1, %ymm0, %xmm2
-; AVX512BW-NEXT: vpmovzxbd {{.*#+}} zmm2 = xmm2[0],zero,zero,zero,xmm2[1],zero,zero,zero,xmm2[2],zero,zero,zero,xmm2[3],zero,zero,zero,xmm2[4],zero,zero,zero,xmm2[5],zero,zero,zero,xmm2[6],zero,zero,zero,xmm2[7],zero,zero,zero,xmm2[8],zero,zero,zero,xmm2[9],zero,zero,zero,xmm2[10],zero,zero,zero,xmm2[11],zero,zero,zero,xmm2[12],zero,zero,zero,xmm2[13],zero,zero,zero,xmm2[14],zero,zero,zero,xmm2[15],zero,zero,zero
-; AVX512BW-NEXT: vplzcntd %zmm2, %zmm2
-; AVX512BW-NEXT: vpmovdb %zmm2, %xmm2
-; AVX512BW-NEXT: vpsubb %xmm3, %xmm2, %xmm2
-; AVX512BW-NEXT: vpmovzxbd {{.*#+}} zmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero,xmm0[4],zero,zero,zero,xmm0[5],zero,zero,zero,xmm0[6],zero,zero,zero,xmm0[7],zero,zero,zero,xmm0[8],zero,zero,zero,xmm0[9],zero,zero,zero,xmm0[10],zero,zero,zero,xmm0[11],zero,zero,zero,xmm0[12],zero,zero,zero,xmm0[13],zero,zero,zero,xmm0[14],zero,zero,zero,xmm0[15],zero,zero,zero
-; AVX512BW-NEXT: vplzcntd %zmm0, %zmm0
-; AVX512BW-NEXT: vpmovdb %zmm0, %xmm0
-; AVX512BW-NEXT: vpsubb %xmm3, %xmm0, %xmm0
-; AVX512BW-NEXT: vinserti128 $1, %xmm2, %ymm0, %ymm0
-; AVX512BW-NEXT: vinserti64x4 $1, %ymm1, %zmm0, %zmm0
+; AVX512BW-NEXT: vpsrlw $1, %zmm0, %zmm1
+; AVX512BW-NEXT: vpandq {{.*}}(%rip), %zmm1, %zmm1
+; AVX512BW-NEXT: vporq %zmm1, %zmm0, %zmm0
+; AVX512BW-NEXT: vpsrlw $2, %zmm0, %zmm1
+; AVX512BW-NEXT: vpandq {{.*}}(%rip), %zmm1, %zmm1
+; AVX512BW-NEXT: vporq %zmm1, %zmm0, %zmm0
+; AVX512BW-NEXT: vpsrlw $4, %zmm0, %zmm1
+; AVX512BW-NEXT: vmovdqa64 {{.*#+}} zmm2 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
+; AVX512BW-NEXT: vpandq %zmm2, %zmm1, %zmm1
+; AVX512BW-NEXT: vporq %zmm1, %zmm0, %zmm0
+; AVX512BW-NEXT: vpandnq %zmm2, %zmm0, %zmm1
+; AVX512BW-NEXT: vmovdqu8 {{.*#+}} zmm3 = [0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4,0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4,0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4,0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4]
+; AVX512BW-NEXT: vpshufb %zmm1, %zmm3, %zmm1
+; AVX512BW-NEXT: vpternlogd $255, %zmm4, %zmm4, %zmm4
+; AVX512BW-NEXT: vpxorq %zmm4, %zmm0, %zmm0
+; AVX512BW-NEXT: vpsrlw $4, %zmm0, %zmm0
+; AVX512BW-NEXT: vpandq %zmm2, %zmm0, %zmm0
+; AVX512BW-NEXT: vpshufb %zmm0, %zmm3, %zmm0
+; AVX512BW-NEXT: vpaddb %zmm1, %zmm0, %zmm0
; AVX512BW-NEXT: retq
+;
+; AVX512DQ-LABEL: testv64i8:
+; AVX512DQ: ## BB#0:
+; AVX512DQ-NEXT: vmovdqa {{.*#+}} ymm2 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
+; AVX512DQ-NEXT: vpand %ymm2, %ymm0, %ymm3
+; AVX512DQ-NEXT: vmovdqa {{.*#+}} ymm4 = [4,3,2,2,1,1,1,1,0,0,0,0,0,0,0,0,4,3,2,2,1,1,1,1,0,0,0,0,0,0,0,0]
+; AVX512DQ-NEXT: vpshufb %ymm3, %ymm4, %ymm3
+; AVX512DQ-NEXT: vpsrlw $4, %ymm0, %ymm0
+; AVX512DQ-NEXT: vpand %ymm2, %ymm0, %ymm0
+; AVX512DQ-NEXT: vpxor %ymm5, %ymm5, %ymm5
+; AVX512DQ-NEXT: vpcmpeqb %ymm5, %ymm0, %ymm6
+; AVX512DQ-NEXT: vpand %ymm6, %ymm3, %ymm3
+; AVX512DQ-NEXT: vpshufb %ymm0, %ymm4, %ymm0
+; AVX512DQ-NEXT: vpaddb %ymm0, %ymm3, %ymm0
+; AVX512DQ-NEXT: vpand %ymm2, %ymm1, %ymm3
+; AVX512DQ-NEXT: vpshufb %ymm3, %ymm4, %ymm3
+; AVX512DQ-NEXT: vpsrlw $4, %ymm1, %ymm1
+; AVX512DQ-NEXT: vpand %ymm2, %ymm1, %ymm1
+; AVX512DQ-NEXT: vpcmpeqb %ymm5, %ymm1, %ymm2
+; AVX512DQ-NEXT: vpand %ymm2, %ymm3, %ymm2
+; AVX512DQ-NEXT: vpshufb %ymm1, %ymm4, %ymm1
+; AVX512DQ-NEXT: vpaddb %ymm1, %ymm2, %ymm1
+; AVX512DQ-NEXT: retq
%out = call <64 x i8> @llvm.ctlz.v64i8(<64 x i8> %in, i1 0)
ret <64 x i8> %out
}
@@ -184,32 +644,78 @@ define <64 x i8> @testv64i8u(<64 x i8> %in) nounwind {
; AVX512CD-NEXT: vinserti128 $1, %xmm2, %ymm1, %ymm1
; AVX512CD-NEXT: retq
;
+; AVX512CDBW-LABEL: testv64i8u:
+; AVX512CDBW: ## BB#0:
+; AVX512CDBW-NEXT: vextracti64x4 $1, %zmm0, %ymm1
+; AVX512CDBW-NEXT: vextracti128 $1, %ymm1, %xmm2
+; AVX512CDBW-NEXT: vpmovzxbd {{.*#+}} zmm2 = xmm2[0],zero,zero,zero,xmm2[1],zero,zero,zero,xmm2[2],zero,zero,zero,xmm2[3],zero,zero,zero,xmm2[4],zero,zero,zero,xmm2[5],zero,zero,zero,xmm2[6],zero,zero,zero,xmm2[7],zero,zero,zero,xmm2[8],zero,zero,zero,xmm2[9],zero,zero,zero,xmm2[10],zero,zero,zero,xmm2[11],zero,zero,zero,xmm2[12],zero,zero,zero,xmm2[13],zero,zero,zero,xmm2[14],zero,zero,zero,xmm2[15],zero,zero,zero
+; AVX512CDBW-NEXT: vplzcntd %zmm2, %zmm2
+; AVX512CDBW-NEXT: vpmovdb %zmm2, %xmm2
+; AVX512CDBW-NEXT: vmovdqa {{.*#+}} xmm3 = [24,24,24,24,24,24,24,24,24,24,24,24,24,24,24,24]
+; AVX512CDBW-NEXT: vpsubb %xmm3, %xmm2, %xmm2
+; AVX512CDBW-NEXT: vpmovzxbd {{.*#+}} zmm1 = xmm1[0],zero,zero,zero,xmm1[1],zero,zero,zero,xmm1[2],zero,zero,zero,xmm1[3],zero,zero,zero,xmm1[4],zero,zero,zero,xmm1[5],zero,zero,zero,xmm1[6],zero,zero,zero,xmm1[7],zero,zero,zero,xmm1[8],zero,zero,zero,xmm1[9],zero,zero,zero,xmm1[10],zero,zero,zero,xmm1[11],zero,zero,zero,xmm1[12],zero,zero,zero,xmm1[13],zero,zero,zero,xmm1[14],zero,zero,zero,xmm1[15],zero,zero,zero
+; AVX512CDBW-NEXT: vplzcntd %zmm1, %zmm1
+; AVX512CDBW-NEXT: vpmovdb %zmm1, %xmm1
+; AVX512CDBW-NEXT: vpsubb %xmm3, %xmm1, %xmm1
+; AVX512CDBW-NEXT: vinserti128 $1, %xmm2, %ymm1, %ymm1
+; AVX512CDBW-NEXT: vextracti128 $1, %ymm0, %xmm2
+; AVX512CDBW-NEXT: vpmovzxbd {{.*#+}} zmm2 = xmm2[0],zero,zero,zero,xmm2[1],zero,zero,zero,xmm2[2],zero,zero,zero,xmm2[3],zero,zero,zero,xmm2[4],zero,zero,zero,xmm2[5],zero,zero,zero,xmm2[6],zero,zero,zero,xmm2[7],zero,zero,zero,xmm2[8],zero,zero,zero,xmm2[9],zero,zero,zero,xmm2[10],zero,zero,zero,xmm2[11],zero,zero,zero,xmm2[12],zero,zero,zero,xmm2[13],zero,zero,zero,xmm2[14],zero,zero,zero,xmm2[15],zero,zero,zero
+; AVX512CDBW-NEXT: vplzcntd %zmm2, %zmm2
+; AVX512CDBW-NEXT: vpmovdb %zmm2, %xmm2
+; AVX512CDBW-NEXT: vpsubb %xmm3, %xmm2, %xmm2
+; AVX512CDBW-NEXT: vpmovzxbd {{.*#+}} zmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero,xmm0[4],zero,zero,zero,xmm0[5],zero,zero,zero,xmm0[6],zero,zero,zero,xmm0[7],zero,zero,zero,xmm0[8],zero,zero,zero,xmm0[9],zero,zero,zero,xmm0[10],zero,zero,zero,xmm0[11],zero,zero,zero,xmm0[12],zero,zero,zero,xmm0[13],zero,zero,zero,xmm0[14],zero,zero,zero,xmm0[15],zero,zero,zero
+; AVX512CDBW-NEXT: vplzcntd %zmm0, %zmm0
+; AVX512CDBW-NEXT: vpmovdb %zmm0, %xmm0
+; AVX512CDBW-NEXT: vpsubb %xmm3, %xmm0, %xmm0
+; AVX512CDBW-NEXT: vinserti128 $1, %xmm2, %ymm0, %ymm0
+; AVX512CDBW-NEXT: vinserti64x4 $1, %ymm1, %zmm0, %zmm0
+; AVX512CDBW-NEXT: retq
+;
; AVX512BW-LABEL: testv64i8u:
; AVX512BW: ## BB#0:
-; AVX512BW-NEXT: vextracti64x4 $1, %zmm0, %ymm1
-; AVX512BW-NEXT: vextracti128 $1, %ymm1, %xmm2
-; AVX512BW-NEXT: vpmovzxbd {{.*#+}} zmm2 = xmm2[0],zero,zero,zero,xmm2[1],zero,zero,zero,xmm2[2],zero,zero,zero,xmm2[3],zero,zero,zero,xmm2[4],zero,zero,zero,xmm2[5],zero,zero,zero,xmm2[6],zero,zero,zero,xmm2[7],zero,zero,zero,xmm2[8],zero,zero,zero,xmm2[9],zero,zero,zero,xmm2[10],zero,zero,zero,xmm2[11],zero,zero,zero,xmm2[12],zero,zero,zero,xmm2[13],zero,zero,zero,xmm2[14],zero,zero,zero,xmm2[15],zero,zero,zero
-; AVX512BW-NEXT: vplzcntd %zmm2, %zmm2
-; AVX512BW-NEXT: vpmovdb %zmm2, %xmm2
-; AVX512BW-NEXT: vmovdqa {{.*#+}} xmm3 = [24,24,24,24,24,24,24,24,24,24,24,24,24,24,24,24]
-; AVX512BW-NEXT: vpsubb %xmm3, %xmm2, %xmm2
-; AVX512BW-NEXT: vpmovzxbd {{.*#+}} zmm1 = xmm1[0],zero,zero,zero,xmm1[1],zero,zero,zero,xmm1[2],zero,zero,zero,xmm1[3],zero,zero,zero,xmm1[4],zero,zero,zero,xmm1[5],zero,zero,zero,xmm1[6],zero,zero,zero,xmm1[7],zero,zero,zero,xmm1[8],zero,zero,zero,xmm1[9],zero,zero,zero,xmm1[10],zero,zero,zero,xmm1[11],zero,zero,zero,xmm1[12],zero,zero,zero,xmm1[13],zero,zero,zero,xmm1[14],zero,zero,zero,xmm1[15],zero,zero,zero
-; AVX512BW-NEXT: vplzcntd %zmm1, %zmm1
-; AVX512BW-NEXT: vpmovdb %zmm1, %xmm1
-; AVX512BW-NEXT: vpsubb %xmm3, %xmm1, %xmm1
-; AVX512BW-NEXT: vinserti128 $1, %xmm2, %ymm1, %ymm1
-; AVX512BW-NEXT: vextracti128 $1, %ymm0, %xmm2
-; AVX512BW-NEXT: vpmovzxbd {{.*#+}} zmm2 = xmm2[0],zero,zero,zero,xmm2[1],zero,zero,zero,xmm2[2],zero,zero,zero,xmm2[3],zero,zero,zero,xmm2[4],zero,zero,zero,xmm2[5],zero,zero,zero,xmm2[6],zero,zero,zero,xmm2[7],zero,zero,zero,xmm2[8],zero,zero,zero,xmm2[9],zero,zero,zero,xmm2[10],zero,zero,zero,xmm2[11],zero,zero,zero,xmm2[12],zero,zero,zero,xmm2[13],zero,zero,zero,xmm2[14],zero,zero,zero,xmm2[15],zero,zero,zero
-; AVX512BW-NEXT: vplzcntd %zmm2, %zmm2
-; AVX512BW-NEXT: vpmovdb %zmm2, %xmm2
-; AVX512BW-NEXT: vpsubb %xmm3, %xmm2, %xmm2
-; AVX512BW-NEXT: vpmovzxbd {{.*#+}} zmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero,xmm0[4],zero,zero,zero,xmm0[5],zero,zero,zero,xmm0[6],zero,zero,zero,xmm0[7],zero,zero,zero,xmm0[8],zero,zero,zero,xmm0[9],zero,zero,zero,xmm0[10],zero,zero,zero,xmm0[11],zero,zero,zero,xmm0[12],zero,zero,zero,xmm0[13],zero,zero,zero,xmm0[14],zero,zero,zero,xmm0[15],zero,zero,zero
-; AVX512BW-NEXT: vplzcntd %zmm0, %zmm0
-; AVX512BW-NEXT: vpmovdb %zmm0, %xmm0
-; AVX512BW-NEXT: vpsubb %xmm3, %xmm0, %xmm0
-; AVX512BW-NEXT: vinserti128 $1, %xmm2, %ymm0, %ymm0
-; AVX512BW-NEXT: vinserti64x4 $1, %ymm1, %zmm0, %zmm0
+; AVX512BW-NEXT: vpsrlw $1, %zmm0, %zmm1
+; AVX512BW-NEXT: vpandq {{.*}}(%rip), %zmm1, %zmm1
+; AVX512BW-NEXT: vporq %zmm1, %zmm0, %zmm0
+; AVX512BW-NEXT: vpsrlw $2, %zmm0, %zmm1
+; AVX512BW-NEXT: vpandq {{.*}}(%rip), %zmm1, %zmm1
+; AVX512BW-NEXT: vporq %zmm1, %zmm0, %zmm0
+; AVX512BW-NEXT: vpsrlw $4, %zmm0, %zmm1
+; AVX512BW-NEXT: vmovdqa64 {{.*#+}} zmm2 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
+; AVX512BW-NEXT: vpandq %zmm2, %zmm1, %zmm1
+; AVX512BW-NEXT: vporq %zmm1, %zmm0, %zmm0
+; AVX512BW-NEXT: vpandnq %zmm2, %zmm0, %zmm1
+; AVX512BW-NEXT: vmovdqu8 {{.*#+}} zmm3 = [0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4,0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4,0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4,0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4]
+; AVX512BW-NEXT: vpshufb %zmm1, %zmm3, %zmm1
+; AVX512BW-NEXT: vpternlogd $255, %zmm4, %zmm4, %zmm4
+; AVX512BW-NEXT: vpxorq %zmm4, %zmm0, %zmm0
+; AVX512BW-NEXT: vpsrlw $4, %zmm0, %zmm0
+; AVX512BW-NEXT: vpandq %zmm2, %zmm0, %zmm0
+; AVX512BW-NEXT: vpshufb %zmm0, %zmm3, %zmm0
+; AVX512BW-NEXT: vpaddb %zmm1, %zmm0, %zmm0
; AVX512BW-NEXT: retq
+;
+; AVX512DQ-LABEL: testv64i8u:
+; AVX512DQ: ## BB#0:
+; AVX512DQ-NEXT: vmovdqa {{.*#+}} ymm2 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
+; AVX512DQ-NEXT: vpand %ymm2, %ymm0, %ymm3
+; AVX512DQ-NEXT: vmovdqa {{.*#+}} ymm4 = [4,3,2,2,1,1,1,1,0,0,0,0,0,0,0,0,4,3,2,2,1,1,1,1,0,0,0,0,0,0,0,0]
+; AVX512DQ-NEXT: vpshufb %ymm3, %ymm4, %ymm3
+; AVX512DQ-NEXT: vpsrlw $4, %ymm0, %ymm0
+; AVX512DQ-NEXT: vpand %ymm2, %ymm0, %ymm0
+; AVX512DQ-NEXT: vpxor %ymm5, %ymm5, %ymm5
+; AVX512DQ-NEXT: vpcmpeqb %ymm5, %ymm0, %ymm6
+; AVX512DQ-NEXT: vpand %ymm6, %ymm3, %ymm3
+; AVX512DQ-NEXT: vpshufb %ymm0, %ymm4, %ymm0
+; AVX512DQ-NEXT: vpaddb %ymm0, %ymm3, %ymm0
+; AVX512DQ-NEXT: vpand %ymm2, %ymm1, %ymm3
+; AVX512DQ-NEXT: vpshufb %ymm3, %ymm4, %ymm3
+; AVX512DQ-NEXT: vpsrlw $4, %ymm1, %ymm1
+; AVX512DQ-NEXT: vpand %ymm2, %ymm1, %ymm1
+; AVX512DQ-NEXT: vpcmpeqb %ymm5, %ymm1, %ymm2
+; AVX512DQ-NEXT: vpand %ymm2, %ymm3, %ymm2
+; AVX512DQ-NEXT: vpshufb %ymm1, %ymm4, %ymm1
+; AVX512DQ-NEXT: vpaddb %ymm1, %ymm2, %ymm1
+; AVX512DQ-NEXT: retq
%out = call <64 x i8> @llvm.ctlz.v64i8(<64 x i8> %in, i1 -1)
ret <64 x i8> %out
}
diff --git a/test/CodeGen/X86/vector-shuffle-variable-128.ll b/test/CodeGen/X86/vector-shuffle-variable-128.ll
index 87fd4a7bf6b9..bde8a16d2a5a 100644
--- a/test/CodeGen/X86/vector-shuffle-variable-128.ll
+++ b/test/CodeGen/X86/vector-shuffle-variable-128.ll
@@ -1303,70 +1303,39 @@ define <8 x i16> @var_shuffle_v8i16_v8i16_xyxyxy00_i16(<8 x i16> %x, <8 x i16> %
; SSE41-NEXT: andl $7, %r8d
; SSE41-NEXT: movaps %xmm1, -{{[0-9]+}}(%rsp)
; SSE41-NEXT: andl $7, %r9d
-; SSE41-NEXT: movzwl -40(%rsp,%rdi,2), %eax
-; SSE41-NEXT: movd %eax, %xmm1
-; SSE41-NEXT: pinsrw $1, -24(%rsp,%rsi,2), %xmm1
-; SSE41-NEXT: pinsrw $2, -40(%rsp,%rdx,2), %xmm1
-; SSE41-NEXT: pinsrw $3, -24(%rsp,%rcx,2), %xmm1
-; SSE41-NEXT: pinsrw $4, -40(%rsp,%r8,2), %xmm1
-; SSE41-NEXT: pinsrw $5, -24(%rsp,%r9,2), %xmm1
; SSE41-NEXT: pxor %xmm0, %xmm0
-; SSE41-NEXT: pblendw {{.*#+}} xmm0 = xmm1[0,1,2,3,4,5],xmm0[6,7]
+; SSE41-NEXT: pinsrw $0, -40(%rsp,%rdi,2), %xmm0
+; SSE41-NEXT: pinsrw $1, -24(%rsp,%rsi,2), %xmm0
+; SSE41-NEXT: pinsrw $2, -40(%rsp,%rdx,2), %xmm0
+; SSE41-NEXT: pinsrw $3, -24(%rsp,%rcx,2), %xmm0
+; SSE41-NEXT: pinsrw $4, -40(%rsp,%r8,2), %xmm0
+; SSE41-NEXT: pinsrw $5, -24(%rsp,%r9,2), %xmm0
; SSE41-NEXT: retq
;
-; AVX1-LABEL: var_shuffle_v8i16_v8i16_xyxyxy00_i16:
-; AVX1: # BB#0:
-; AVX1-NEXT: # kill: %R9D<def> %R9D<kill> %R9<def>
-; AVX1-NEXT: # kill: %R8D<def> %R8D<kill> %R8<def>
-; AVX1-NEXT: # kill: %ECX<def> %ECX<kill> %RCX<def>
-; AVX1-NEXT: # kill: %EDX<def> %EDX<kill> %RDX<def>
-; AVX1-NEXT: # kill: %ESI<def> %ESI<kill> %RSI<def>
-; AVX1-NEXT: # kill: %EDI<def> %EDI<kill> %RDI<def>
-; AVX1-NEXT: andl $7, %edi
-; AVX1-NEXT: andl $7, %esi
-; AVX1-NEXT: andl $7, %edx
-; AVX1-NEXT: andl $7, %ecx
-; AVX1-NEXT: vmovaps %xmm0, -{{[0-9]+}}(%rsp)
-; AVX1-NEXT: andl $7, %r8d
-; AVX1-NEXT: vmovdqa %xmm1, -{{[0-9]+}}(%rsp)
-; AVX1-NEXT: andl $7, %r9d
-; AVX1-NEXT: movzwl -40(%rsp,%rdi,2), %eax
-; AVX1-NEXT: vmovd %eax, %xmm0
-; AVX1-NEXT: vpinsrw $1, -24(%rsp,%rsi,2), %xmm0, %xmm0
-; AVX1-NEXT: vpinsrw $2, -40(%rsp,%rdx,2), %xmm0, %xmm0
-; AVX1-NEXT: vpinsrw $3, -24(%rsp,%rcx,2), %xmm0, %xmm0
-; AVX1-NEXT: vpinsrw $4, -40(%rsp,%r8,2), %xmm0, %xmm0
-; AVX1-NEXT: vpinsrw $5, -24(%rsp,%r9,2), %xmm0, %xmm0
-; AVX1-NEXT: vpxor %xmm1, %xmm1, %xmm1
-; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,5],xmm1[6,7]
-; AVX1-NEXT: retq
-;
-; AVX2-LABEL: var_shuffle_v8i16_v8i16_xyxyxy00_i16:
-; AVX2: # BB#0:
-; AVX2-NEXT: # kill: %R9D<def> %R9D<kill> %R9<def>
-; AVX2-NEXT: # kill: %R8D<def> %R8D<kill> %R8<def>
-; AVX2-NEXT: # kill: %ECX<def> %ECX<kill> %RCX<def>
-; AVX2-NEXT: # kill: %EDX<def> %EDX<kill> %RDX<def>
-; AVX2-NEXT: # kill: %ESI<def> %ESI<kill> %RSI<def>
-; AVX2-NEXT: # kill: %EDI<def> %EDI<kill> %RDI<def>
-; AVX2-NEXT: andl $7, %edi
-; AVX2-NEXT: andl $7, %esi
-; AVX2-NEXT: andl $7, %edx
-; AVX2-NEXT: andl $7, %ecx
-; AVX2-NEXT: vmovaps %xmm0, -{{[0-9]+}}(%rsp)
-; AVX2-NEXT: andl $7, %r8d
-; AVX2-NEXT: vmovdqa %xmm1, -{{[0-9]+}}(%rsp)
-; AVX2-NEXT: andl $7, %r9d
-; AVX2-NEXT: movzwl -40(%rsp,%rdi,2), %eax
-; AVX2-NEXT: vmovd %eax, %xmm0
-; AVX2-NEXT: vpinsrw $1, -24(%rsp,%rsi,2), %xmm0, %xmm0
-; AVX2-NEXT: vpinsrw $2, -40(%rsp,%rdx,2), %xmm0, %xmm0
-; AVX2-NEXT: vpinsrw $3, -24(%rsp,%rcx,2), %xmm0, %xmm0
-; AVX2-NEXT: vpinsrw $4, -40(%rsp,%r8,2), %xmm0, %xmm0
-; AVX2-NEXT: vpinsrw $5, -24(%rsp,%r9,2), %xmm0, %xmm0
-; AVX2-NEXT: vpxor %xmm1, %xmm1, %xmm1
-; AVX2-NEXT: vpblendd {{.*#+}} xmm0 = xmm0[0,1,2],xmm1[3]
-; AVX2-NEXT: retq
+; AVX-LABEL: var_shuffle_v8i16_v8i16_xyxyxy00_i16:
+; AVX: # BB#0:
+; AVX-NEXT: # kill: %R9D<def> %R9D<kill> %R9<def>
+; AVX-NEXT: # kill: %R8D<def> %R8D<kill> %R8<def>
+; AVX-NEXT: # kill: %ECX<def> %ECX<kill> %RCX<def>
+; AVX-NEXT: # kill: %EDX<def> %EDX<kill> %RDX<def>
+; AVX-NEXT: # kill: %ESI<def> %ESI<kill> %RSI<def>
+; AVX-NEXT: # kill: %EDI<def> %EDI<kill> %RDI<def>
+; AVX-NEXT: andl $7, %edi
+; AVX-NEXT: andl $7, %esi
+; AVX-NEXT: andl $7, %edx
+; AVX-NEXT: andl $7, %ecx
+; AVX-NEXT: vmovdqa %xmm0, -{{[0-9]+}}(%rsp)
+; AVX-NEXT: andl $7, %r8d
+; AVX-NEXT: vmovaps %xmm1, -{{[0-9]+}}(%rsp)
+; AVX-NEXT: andl $7, %r9d
+; AVX-NEXT: vpxor %xmm0, %xmm0, %xmm0
+; AVX-NEXT: vpinsrw $0, -40(%rsp,%rdi,2), %xmm0, %xmm0
+; AVX-NEXT: vpinsrw $1, -24(%rsp,%rsi,2), %xmm0, %xmm0
+; AVX-NEXT: vpinsrw $2, -40(%rsp,%rdx,2), %xmm0, %xmm0
+; AVX-NEXT: vpinsrw $3, -24(%rsp,%rcx,2), %xmm0, %xmm0
+; AVX-NEXT: vpinsrw $4, -40(%rsp,%r8,2), %xmm0, %xmm0
+; AVX-NEXT: vpinsrw $5, -24(%rsp,%r9,2), %xmm0, %xmm0
+; AVX-NEXT: retq
%x0 = extractelement <8 x i16> %x, i16 %i0
%y1 = extractelement <8 x i16> %y, i16 %i1
%x2 = extractelement <8 x i16> %x, i16 %i2
diff --git a/test/CodeGen/X86/win64_eh_leaf.ll b/test/CodeGen/X86/win64_eh_leaf.ll
index 21a423ab36a9..35d55a907375 100644
--- a/test/CodeGen/X86/win64_eh_leaf.ll
+++ b/test/CodeGen/X86/win64_eh_leaf.ll
@@ -29,3 +29,12 @@ entry:
; and no unwind info in the object file.
; READOBJ-NOT: leaf_func
}
+
+define void @naked_func() naked {
+ call void asm sideeffect "ret", ""()
+ unreachable
+}
+; ASM-LABEL: naked_func:
+; ASM-NOT: .seh_
+; ASM: ret
+; ASM-NOT: .seh_
diff --git a/test/CodeGen/X86/xray-attribute-instrumentation.ll b/test/CodeGen/X86/xray-attribute-instrumentation.ll
index c52ccf9356bc..7c60327d2c30 100644
--- a/test/CodeGen/X86/xray-attribute-instrumentation.ll
+++ b/test/CodeGen/X86/xray-attribute-instrumentation.ll
@@ -15,10 +15,17 @@ define i32 @foo() nounwind noinline uwtable "function-instrument"="xray-always"
}
; CHECK: .p2align 4, 0x90
; CHECK-NEXT: .quad {{.*}}xray_synthetic_0
+; CHECK-NEXT: .quad {{.*}}xray_fn_idx_synth_0
; CHECK-NEXT: .section {{.*}}xray_instr_map
; CHECK-LABEL: Lxray_synthetic_0:
; CHECK: .quad {{.*}}xray_sled_0
; CHECK: .quad {{.*}}xray_sled_1
+; CHECK-LABEL: Lxray_synthetic_end0:
+; CHECK: .section {{.*}}xray_fn_idx
+; CHECK-LABEL: Lxray_fn_idx_synth_0:
+; CHECK: .quad {{.*}}xray_synthetic_0
+; CHECK-NEXT: .quad {{.*}}xray_synthetic_end0
+
; We test multiple returns in a single function to make sure we're getting all
; of them with XRay instrumentation.
@@ -46,8 +53,14 @@ NotEqual:
}
; CHECK: .p2align 4, 0x90
; CHECK-NEXT: .quad {{.*}}xray_synthetic_1
+; CHECK-NEXT: .quad {{.*}}xray_fn_idx_synth_1
; CHECK-NEXT: .section {{.*}}xray_instr_map
; CHECK-LABEL: Lxray_synthetic_1:
; CHECK: .quad {{.*}}xray_sled_2
; CHECK: .quad {{.*}}xray_sled_3
; CHECK: .quad {{.*}}xray_sled_4
+; CHECK-LABEL: Lxray_synthetic_end1:
+; CHECK: .section {{.*}}xray_fn_idx
+; CHECK-LABEL: Lxray_fn_idx_synth_1:
+; CHECK: .quad {{.*}}xray_synthetic_1
+; CHECK-NEXT: .quad {{.*}}xray_synthetic_end1
diff --git a/test/CodeGen/X86/xray-custom-log.ll b/test/CodeGen/X86/xray-custom-log.ll
new file mode 100644
index 000000000000..63625d44b4cb
--- /dev/null
+++ b/test/CodeGen/X86/xray-custom-log.ll
@@ -0,0 +1,23 @@
+; RUN: llc -filetype=asm -o - -mtriple=x86_64-unknown-linux-gnu < %s | FileCheck %s
+
+define i32 @fn() nounwind noinline uwtable "function-instrument"="xray-always" {
+ %eventptr = alloca i8
+ %eventsize = alloca i32
+ store i32 3, i32* %eventsize
+ %val = load i32, i32* %eventsize
+ call void @llvm.xray.customevent(i8* %eventptr, i32 %val)
+ ; CHECK-LABEL: Lxray_event_sled_0:
+ ; CHECK-NEXT: .ascii "\353\024
+ ; CHECK-NEXT: pushq %rax
+ ; CHECK-NEXT: movq {{.*}}, %rdi
+ ; CHECK-NEXT: movq {{.*}}, %rsi
+ ; CHECK-NEXT: movabsq $__xray_CustomEvent, %rax
+ ; CHECK-NEXT: callq *%rax
+ ; CHECK-NEXT: popq %rax
+ ret i32 0
+}
+; CHECK: .section {{.*}}xray_instr_map
+; CHECK-LABEL: Lxray_synthetic_0:
+; CHECK: .quad {{.*}}xray_event_sled_0
+
+declare void @llvm.xray.customevent(i8*, i32)
diff --git a/test/CodeGen/X86/xray-loop-detection.ll b/test/CodeGen/X86/xray-loop-detection.ll
new file mode 100644
index 000000000000..3cd6b4aa6f8c
--- /dev/null
+++ b/test/CodeGen/X86/xray-loop-detection.ll
@@ -0,0 +1,23 @@
+; RUN: llc -filetype=asm -o - -mtriple=x86_64-unknown-linux-gnu < %s | FileCheck %s
+; RUN: llc -filetype=asm -o - -mtriple=x86_64-darwin-unknown < %s | FileCheck %s
+
+define i32 @foo(i32 %i) nounwind noinline uwtable "xray-instruction-threshold"="1" {
+entry:
+ br label %Test
+Test:
+ %indvar = phi i32 [0, %entry], [%nextindvar, %Inc]
+ %cond = icmp eq i32 %indvar, %i
+ br i1 %cond, label %Exit, label %Inc
+Inc:
+ %nextindvar = add i32 %indvar, 1
+ br label %Test
+Exit:
+ %retval = phi i32 [%indvar, %Test]
+ ret i32 %retval
+}
+
+; CHECK-LABEL: xray_sled_0:
+; CHECK-NEXT: .ascii "\353\t"
+; CHECK-NEXT: nopw 512(%rax,%rax)
+; CHECK-LABEL: Ltmp0:
+
diff --git a/test/CodeGen/X86/xray-tail-call-sled.ll b/test/CodeGen/X86/xray-tail-call-sled.ll
index ece786a5e809..b12c78a77b20 100644
--- a/test/CodeGen/X86/xray-tail-call-sled.ll
+++ b/test/CodeGen/X86/xray-tail-call-sled.ll
@@ -14,11 +14,17 @@ define i32 @callee() nounwind noinline uwtable "function-instrument"="xray-alway
; CHECK-NEXT: nopw %cs:512(%rax,%rax)
}
; CHECK: .p2align 4, 0x90
-; CHECK-NEXT: .quad {{.*}}xray_synthetic_0
+; CHECK-NEXT: .quad {{.*}}xray_synthetic_0{{.*}}
+; CHECK-NEXT: .quad {{.*}}xray_fn_idx_synth_0{{.*}}
; CHECK-NEXT: .section {{.*}}xray_instr_map
; CHECK-LABEL: Lxray_synthetic_0:
; CHECK: .quad {{.*}}xray_sled_0
; CHECK: .quad {{.*}}xray_sled_1
+; CHECK-LABEL: Lxray_synthetic_end0:
+; CHECK-NEXT: .section {{.*}}xray_fn_idx
+; CHECK-LABEL: Lxray_fn_idx_synth_0:
+; CHECK: .quad {{.*}}xray_synthetic_0
+; CHECK-NEXT: .quad {{.*}}xray_synthetic_end0
define i32 @caller() nounwind noinline uwtable "function-instrument"="xray-always" {
; CHECK: .p2align 1, 0x90
@@ -36,7 +42,13 @@ define i32 @caller() nounwind noinline uwtable "function-instrument"="xray-alway
ret i32 %retval
}
; CHECK: .p2align 4, 0x90
-; CHECK-NEXT: .quad {{.*}}xray_synthetic_1
+; CHECK-NEXT: .quad {{.*}}xray_synthetic_1{{.*}}
+; CHECK-NEXT: .quad {{.*}}xray_fn_idx_synth_1{{.*}}
; CHECK-LABEL: Lxray_synthetic_1:
; CHECK: .quad {{.*}}xray_sled_2
; CHECK: .quad {{.*}}xray_sled_3
+; CHECK-LABEL: Lxray_synthetic_end1:
+; CHECK: .section {{.*}}xray_fn_idx
+; CHECK-LABEL: Lxray_fn_idx_synth_1:
+; CHECK: .quad {{.*}}xray_synthetic_1
+; CHECK: .quad {{.*}}xray_synthetic_end1
diff --git a/test/DebugInfo/COFF/synthetic.ll b/test/DebugInfo/COFF/synthetic.ll
new file mode 100644
index 000000000000..7a2f3b87b9e6
--- /dev/null
+++ b/test/DebugInfo/COFF/synthetic.ll
@@ -0,0 +1,55 @@
+; RUN: llc -mtriple x86_64-unknown-windows-msvc -filetype asm -o - %s | FileCheck %s
+
+target datalayout = "e-m:w-i64:64-f80:128-n8:16:32:64-S128"
+target triple = "x86_64-unknown-windows-msvc"
+
+define dllexport void ()* @f() !dbg !6 {
+entry:
+ ret void ()* null, !dbg !28
+}
+
+!llvm.dbg.cu = !{!0}
+!llvm.module.flags = !{!3, !4}
+
+!0 = distinct !DICompileUnit(language: DW_LANG_C99, file: !1, producer: "clang", isOptimized: false, runtimeVersion: 0, emissionKind: FullDebug, enums: !2)
+!1 = !DIFile(filename: "<stdin>", directory: "/Users/compnerd/Source/llvm", checksumkind: CSK_MD5, checksum: "2851eea4f12e754f1a68c47a7045406a")
+!2 = !{}
+!3 = !{i32 2, !"CodeView", i32 1}
+!4 = !{i32 2, !"Debug Info Version", i32 3}
+!6 = distinct !DISubprogram(name: "f", scope: !1, file: !1, line: 1, type: !7, isLocal: false, isDefinition: true, scopeLine: 1, flags: DIFlagPrototyped, isOptimized: false, unit: !0, variables: !2)
+!7 = !DISubroutineType(types: !8)
+!8 = !{!9}
+!9 = !DIDerivedType(tag: DW_TAG_pointer_type, baseType: !10, size: 64)
+!10 = !DICompositeType(tag: DW_TAG_structure_type, scope: !1, size: 256, flags: DIFlagAppleBlock, elements: !11)
+!11 = !{!12, !14, !16, !17, !21}
+!12 = !DIDerivedType(tag: DW_TAG_member, name: "__isa", scope: !1, file: !1, baseType: !13, size: 64)
+!13 = !DIDerivedType(tag: DW_TAG_pointer_type, baseType: null, size: 64)
+!14 = !DIDerivedType(tag: DW_TAG_member, name: "__flags", scope: !1, file: !1, baseType: !15, size: 32, offset: 64)
+!15 = !DIBasicType(name: "int", size: 32, encoding: DW_ATE_signed)
+!16 = !DIDerivedType(tag: DW_TAG_member, name: "__reserved", scope: !1, file: !1, baseType: !15, size: 32, offset: 96)
+!17 = !DIDerivedType(tag: DW_TAG_member, name: "__FuncPtr", scope: !1, file: !1, baseType: !18, size: 64, offset: 128)
+!18 = !DIDerivedType(tag: DW_TAG_pointer_type, baseType: !19, size: 64)
+!19 = !DISubroutineType(types: !20)
+!20 = !{null}
+!21 = !DIDerivedType(tag: DW_TAG_member, name: "__descriptor", scope: !1, baseType: !22, size: 64, align: 64, offset: 192)
+!22 = !DIDerivedType(tag: DW_TAG_pointer_type, baseType: !23, size: 64)
+!23 = !DICompositeType(tag: DW_TAG_structure_type, name: "__block_descriptor", scope: !1, size: 64, flags: DIFlagAppleBlock, elements: !24)
+!24 = !{!25, !27}
+!25 = !DIDerivedType(tag: DW_TAG_member, name: "reserved", scope: !1, file: !1, baseType: !26, size: 32)
+!26 = !DIBasicType(name: "long unsigned int", size: 32, encoding: DW_ATE_unsigned)
+!27 = !DIDerivedType(tag: DW_TAG_member, name: "Size", scope: !1, file: !1, baseType: !26, size: 32, offset: 32)
+!28 = !DILocation(line: 1, scope: !6)
+
+; CHECK: # Struct
+; CHECK: # TypeLeafKind: LF_STRUCTURE
+; CHECK: # MemberCount: 0
+; CHECK: # Properties [
+; CHECK: # ForwardReference
+; CHECK: # ]
+; CHECK: # FieldList: 0x0
+; CHECK: # DerivedFrom: 0x0
+; CHECK: # VShape: 0x0
+; CHECK: # SizeOf: 0
+; CHECK: # Name: __block_descriptor
+; CHECK: # }
+
diff --git a/test/DebugInfo/Inputs/dwarfdump-decompression-error.elf-x86-64 b/test/DebugInfo/Inputs/dwarfdump-decompression-error.elf-x86-64
new file mode 100644
index 000000000000..ba352f51123d
--- /dev/null
+++ b/test/DebugInfo/Inputs/dwarfdump-decompression-error.elf-x86-64
Binary files differ
diff --git a/test/DebugInfo/dwarfdump-decompression-error.test b/test/DebugInfo/dwarfdump-decompression-error.test
new file mode 100644
index 000000000000..184833164dc9
--- /dev/null
+++ b/test/DebugInfo/dwarfdump-decompression-error.test
@@ -0,0 +1,15 @@
+REQUIRES: zlib
+
+// dwarfdump-decompression-error.elf-x86-64 is prepared using following
+// source code and invocation:
+// test.cpp:
+// int main() { return 0; }
+//
+// gcc test.cpp -o out -g -Wl,--compress-debug-sections,zlib
+//
+// After that result object was modified manually. One random byte in compressed
+// content of .debug_info section was changed to 0xff. That breaks normal
+// decompression flow in runtime.
+RUN: llvm-dwarfdump %p/Inputs/dwarfdump-decompression-error.elf-x86-64 2>&1 | FileCheck %s
+
+CHECK: error: failed to decompress '.debug_info', zlib error: Z_DATA_ERROR
diff --git a/test/Linker/metadata-global.ll b/test/Linker/metadata-global.ll
new file mode 100644
index 000000000000..56d77e128bde
--- /dev/null
+++ b/test/Linker/metadata-global.ll
@@ -0,0 +1,11 @@
+; RUN: llvm-link %s -S | FileCheck %s
+
+; CHECK-DAG: @a = global i32 0
+; CHECK-DAG: @b = global i32 0, !associated !0
+
+; CHECK-DAG: !0 = !{i32* @b}
+
+@a = global i32 0
+@b = global i32 0, !associated !0
+
+!0 = !{i32* @b}
diff --git a/test/MC/AArch64/basic-a64-instructions.s b/test/MC/AArch64/basic-a64-instructions.s
index 8a82c99eb8c1..46b2397ec734 100644
--- a/test/MC/AArch64/basic-a64-instructions.s
+++ b/test/MC/AArch64/basic-a64-instructions.s
@@ -1496,23 +1496,6 @@ _func:
// Data-processing (2 source)
//------------------------------------------------------------------------------
- crc32b w5, w7, w20
- crc32h w28, wzr, w30
- crc32w w0, w1, w2
- crc32x w7, w9, x20
- crc32cb w9, w5, w4
- crc32ch w13, w17, w25
- crc32cw wzr, w3, w5
- crc32cx w18, w16, xzr
-// CHECK: crc32b w5, w7, w20 // encoding: [0xe5,0x40,0xd4,0x1a]
-// CHECK: crc32h w28, wzr, w30 // encoding: [0xfc,0x47,0xde,0x1a]
-// CHECK: crc32w w0, w1, w2 // encoding: [0x20,0x48,0xc2,0x1a]
-// CHECK: crc32x w7, w9, x20 // encoding: [0x27,0x4d,0xd4,0x9a]
-// CHECK: crc32cb w9, w5, w4 // encoding: [0xa9,0x50,0xc4,0x1a]
-// CHECK: crc32ch w13, w17, w25 // encoding: [0x2d,0x56,0xd9,0x1a]
-// CHECK: crc32cw wzr, w3, w5 // encoding: [0x7f,0x58,0xc5,0x1a]
-// CHECK: crc32cx w18, w16, xzr // encoding: [0x12,0x5e,0xdf,0x9a]
-
udiv w0, w7, w10
udiv x9, x22, x4
sdiv w12, w21, w0
diff --git a/test/MC/AArch64/crc.s b/test/MC/AArch64/crc.s
new file mode 100644
index 000000000000..f0e4a5aa7531
--- /dev/null
+++ b/test/MC/AArch64/crc.s
@@ -0,0 +1,45 @@
+// RUN: llvm-mc -triple aarch64-- -mattr=+crc %s 2>&1 |\
+// RUN: FileCheck %s --check-prefix=CRC
+
+// RUN: not llvm-mc -triple aarch64-- %s 2>&1 |\
+// RUN: FileCheck %s --check-prefix=NOCRC
+// RUN: not llvm-mc -triple aarch64-- -mcpu=cyclone %s 2>&1 |\
+// RUN: FileCheck %s --check-prefix=NOCRC
+
+ crc32b w0, w1, w5
+ crc32h w3, w5, w6
+ crc32w w19, wzr, w20
+ crc32x w3, w5, x20
+
+// CRC: crc32b w0, w1, w5
+// CRC: crc32h w3, w5, w6
+// CRC: crc32w w19, wzr, w20
+// CRC: crc32x w3, w5, x20
+
+// NOCRC: error: instruction requires: crc
+// NOCRC: crc32b w0, w1, w5
+// NOCRC: error: instruction requires: crc
+// NOCRC: crc32h w3, w5, w6
+// NOCRC: error: instruction requires: crc
+// NOCRC: crc32w w19, wzr, w20
+// NOCRC: error: instruction requires: crc
+// NOCRC: crc32x w3, w5, x20
+
+ crc32cb w5, w10, w15
+ crc32ch w3, w5, w7
+ crc32cw w11, w13, w17
+ crc32cx w19, w23, x29
+
+// CRC: crc32cb w5, w10, w15
+// CRC: crc32ch w3, w5, w7
+// CRC: crc32cw w11, w13, w17
+// CRC: crc32cx w19, w23, x29
+
+// NOCRC: error: instruction requires: crc
+// NOCRC: crc32cb w5, w10, w15
+// NOCRC: error: instruction requires: crc
+// NOCRC: crc32ch w3, w5, w7
+// NOCRC: error: instruction requires: crc
+// NOCRC: crc32cw w11, w13, w17
+// NOCRC: error: instruction requires: crc
+// NOCRC: crc32cx w19, w23, x29
diff --git a/test/MC/AArch64/cyclone-crc.s b/test/MC/AArch64/cyclone-crc.s
deleted file mode 100644
index 5786df51ddeb..000000000000
--- a/test/MC/AArch64/cyclone-crc.s
+++ /dev/null
@@ -1,27 +0,0 @@
-// RUN: not llvm-mc -triple arm64-apple-ios -mcpu=cyclone %s 2>&1 | FileCheck %s
-
- crc32b w0, w1, w5
- crc32h w3, w5, w6
- crc32w w19, wzr, w20
- crc32x w3, w5, x20
-CHECK: error: instruction requires: crc
-CHECK: crc32b w0, w1, w5
-CHECK: error: instruction requires: crc
-CHECK: crc32h w3, w5, w6
-CHECK: error: instruction requires: crc
-CHECK: crc32w w19, wzr, w20
-CHECK: error: instruction requires: crc
-CHECK: crc32x w3, w5, x20
-
- crc32cb w5, w10, w15
- crc32ch w3, w5, w7
- crc32cw w11, w13, w17
- crc32cx w19, w23, x29
-CHECK: error: instruction requires: crc
-CHECK: crc32cb w5, w10, w15
-CHECK: error: instruction requires: crc
-CHECK: crc32ch w3, w5, w7
-CHECK: error: instruction requires: crc
-CHECK: crc32cw w11, w13, w17
-CHECK: error: instruction requires: crc
-CHECK: crc32cx w19, w23, x29
diff --git a/test/MC/AArch64/directive-arch-negative.s b/test/MC/AArch64/directive-arch-negative.s
index 21fd90ebdf11..2991d2499ebf 100644
--- a/test/MC/AArch64/directive-arch-negative.s
+++ b/test/MC/AArch64/directive-arch-negative.s
@@ -44,6 +44,12 @@
# CHECK: error: instruction requires: lse
# CHECK: casa w5, w7, [x19]
+ .arch armv8+crypto
+ crc32b w0, w1, w2
+
+# CHECK: error: instruction requires: crc
+# CHECK: crc32b w0, w1, w2
+
.arch armv8.1-a+nolse
casa w5, w7, [x20]
diff --git a/test/MC/ARM/ltorg-range.s b/test/MC/ARM/ltorg-range.s
new file mode 100644
index 000000000000..5c27d4cd0df2
--- /dev/null
+++ b/test/MC/ARM/ltorg-range.s
@@ -0,0 +1,27 @@
+@ RUN: llvm-mc -triple armv7-unknown-linux-gnueabi -filetype obj -o - %s \
+@ RUN: | llvm-objdump -d - | FileCheck %s
+
+ ldr r0, =0x01020304
+@ CHECK: ldr
+ .ltorg
+@ CHECK: 0x01020304
+ ldr r0, =0x01020304
+ ldr r0, =0x01020304
+ ldr r0, =0x01020304
+@ CHECK: ldr
+@ CHECK: ldr
+@ CHECK: ldr
+ .ltorg
+@ CHECK: 0x01020304
+ .rep 1028
+ .word 0
+ .endr
+@ CHECK: 0x00000000
+
+ ldr r0, =0x01020304
+@ CHECK: ldr
+ .ltorg
+@ CHECK: 0x01020304
+ .rep 1028
+ .word 0
+ .endr
diff --git a/test/MC/ARM/negative-immediates-fail.s b/test/MC/ARM/negative-immediates-fail.s
index dd45e4316389..959e55eebb5a 100644
--- a/test/MC/ARM/negative-immediates-fail.s
+++ b/test/MC/ARM/negative-immediates-fail.s
@@ -11,3 +11,8 @@ ADC r0, r1, #0xFFFFFE02
ADD.W r0, r0, #0xFF01FF01
# CHECK: error: immediate operand must be in the range [0,7]
+
+ORR r0, r1, #0xFFFFFF00
+# CHECK: error: instruction requires: thumb2
+ORN r0, r1, #0xFFFFFF00
+# CHECK: error: instruction requires: thumb2
diff --git a/test/MC/ARM/negative-immediates-thumb1-fail.s b/test/MC/ARM/negative-immediates-thumb1-fail.s
index 0e8525ede903..3648721203a0 100644
--- a/test/MC/ARM/negative-immediates-thumb1-fail.s
+++ b/test/MC/ARM/negative-immediates-thumb1-fail.s
@@ -13,3 +13,8 @@ SUBs r1, r0, #0xFFFFFFF5
SUBs r0, #0xFFFFFEFF
# CHECK: error: immediate operand must be in the range [0,255]
+
+ORRs r0, r1, #0xFFFFFF00
+# CHECK: error: instruction requires: thumb2
+ORNs r0, r1, #0xFFFFFF00
+# CHECK: error: instruction requires: thumb2
diff --git a/test/MC/ARM/negative-immediates.s b/test/MC/ARM/negative-immediates.s
index aa3998163d88..38a6bbb1b7b4 100644
--- a/test/MC/ARM/negative-immediates.s
+++ b/test/MC/ARM/negative-immediates.s
@@ -98,6 +98,22 @@
# CHECK: and r0, r1, #16777472 @ encoding: [0x01,0xf0,0x01,0x20]
# CHECK-DISABLED: error: instruction requires: NegativeImmediates
# CHECK-DISABLED: BIC
+ ORR r0, r1, #0xFFFFFF00
+# CHECK-DISABLED: error: instruction requires: NegativeImmediates
+# CHECK-DISABLED: ORR
+# CHECK: orn r0, r1, #255
+ ORR r0, r1, #0xFEFFFEFF
+# CHECK: orn r0, r1, #16777472 @ encoding: [0x61,0xf0,0x01,0x20]
+# CHECK-DISABLED: error: instruction requires: NegativeImmediates
+# CHECK-DISABLED: ORR
+ ORN r0, r1, #0xFFFFFF00
+# CHECK: orr r0, r1, #255
+# CHECK-DISABLED: error: instruction requires: NegativeImmediates
+# CHECK-DISABLED: ORN
+ ORN r0, r1, #0xFEFFFEFF
+# CHECK: orr r0, r1, #16777472 @ encoding: [0x41,0xf0,0x01,0x20]
+# CHECK-DISABLED: error: instruction requires: NegativeImmediates
+# CHECK-DISABLED: ORN
CMP r0, #0xFFFFFF01
# CHECK: cmn.w r0, #255
# CHECK-DISABLED: error: instruction requires: NegativeImmediates
diff --git a/test/MC/AsmParser/altmacro_string.s b/test/MC/AsmParser/altmacro_string.s
new file mode 100644
index 000000000000..70012b2b8523
--- /dev/null
+++ b/test/MC/AsmParser/altmacro_string.s
@@ -0,0 +1,73 @@
+# RUN: llvm-mc -triple i386-linux-gnu %s| FileCheck %s
+
+# This test checks the altmacro string delimiter '<' and '>'.
+
+.altmacro
+
+# Test #1:
+# You can delimit strings with matching angle brackets '<' '>'.
+# If an argument begins with '<' and ends with '>'.
+# The argument is considered as a string.
+
+# CHECK: simpleCheck:
+.macro simple_check_0 name
+ \name:
+ addl $5,%eax
+.endm
+
+simple_check_0 <simpleCheck>
+
+# Test #2:
+# Except adding new string marks '<..>', a regular macro behavior is expected.
+
+# CHECK: simpleCheck0:
+# CHECK: addl $0, %eax
+.macro concat string1 string2 string3
+ \string1\string2\string3:
+ addl $\string3, %eax
+.endm
+
+concat <simple>,<Check>,<0>
+
+# Test #3:
+# The altmacro cannot affect the regular less/greater behavior.
+
+# CHECK: addl $1, %eax
+# CHECK: addl $0, %eax
+
+.macro fun3 arg1 arg2
+ addl $\arg1,%eax
+ addl $\arg2,%eax
+.endm
+
+fun3 5<6 , 5>8
+
+# Test #4:
+# If a comma is present inside an angle brackets,
+# the comma considered as a character and not as a separator.
+# This check checks the ability to split the string to different
+# arguments according to the use of the comma.
+# Fun2 sees the comma as a character.
+# Fun3 sees the comma as a separator.
+
+# CHECK: addl $5, %eax
+# CHECK: addl $6, %eax
+.macro fun2 arg
+ fun3 \arg
+.endm
+
+fun2 <5,6>
+
+# Test #5:
+# If argument begin with '<' and there is no '>' to close it.
+# A regular macro behavior is expected.
+
+# CHECK: addl $4, %eax
+.macro fun4 arg1 arg2
+ .if \arg2\arg1
+ addl $\arg2,%eax
+ .endif
+.endm
+
+fun4 <5,4
+.noaltmacro
diff --git a/test/MC/AsmParser/negative_altmacro_string.s b/test/MC/AsmParser/negative_altmacro_string.s
new file mode 100644
index 000000000000..81096c6cbdaa
--- /dev/null
+++ b/test/MC/AsmParser/negative_altmacro_string.s
@@ -0,0 +1,29 @@
+# RUN: not llvm-mc -triple i386-linux-gnu %s 2>&1 | FileCheck %s
+
+# This test checks the altmacro string delimiter '<' and '>'.
+# In this test we check the '.noaltmacro' directive.
+# We expect that '.altmacro' and '.noaltmacro' will act as a switch on/off directives to the alternate macro mode.
+# .noaltmacro returns the format into a regular macro handling.
+# The default mode is ".noaltmacro".
+
+# Test #1: default mode
+# CHECK: error: unexpected token at start of statement
+# CHECK-NEXT: <simpleCheck>:
+.macro simple_check_0 name
+ \name:
+.endm
+
+simple_check_0 <simpleCheck>
+
+
+.altmacro
+.noaltmacro
+
+# Test #2: Switching from alternate mode to default mode
+# CHECK: error: unexpected token at start of statement
+# CHECK-NEXT: <simpleCheck1>:
+.macro simple_check_1 name
+ \name:
+.endm
+
+simple_check_1 <simpleCheck1>
diff --git a/test/MC/Disassembler/AArch64/basic-a64-instructions.txt b/test/MC/Disassembler/AArch64/basic-a64-instructions.txt
index 4d438e032e77..a2f9d24091ef 100644
--- a/test/MC/Disassembler/AArch64/basic-a64-instructions.txt
+++ b/test/MC/Disassembler/AArch64/basic-a64-instructions.txt
@@ -1042,23 +1042,6 @@
# Data-processing (2 source)
#------------------------------------------------------------------------------
-# CHECK: crc32b w5, w7, w20
-# CHECK: crc32h w28, wzr, w30
-# CHECK: crc32w w0, w1, w2
-# CHECK: crc32x w7, w9, x20
-# CHECK: crc32cb w9, w5, w4
-# CHECK: crc32ch w13, w17, w25
-# CHECK: crc32cw wzr, w3, w5
-# CHECK: crc32cx w18, w16, xzr
-0xe5 0x40 0xd4 0x1a
-0xfc 0x47 0xde 0x1a
-0x20 0x48 0xc2 0x1a
-0x27 0x4d 0xd4 0x9a
-0xa9 0x50 0xc4 0x1a
-0x2d 0x56 0xd9 0x1a
-0x7f 0x58 0xc5 0x1a
-0x12 0x5e 0xdf 0x9a
-
# CHECK: udiv w0, w7, w10
# CHECK: udiv x9, x22, x4
# CHECK: sdiv w12, w21, w0
diff --git a/test/ObjectYAML/wasm/name_section.yaml b/test/ObjectYAML/wasm/name_section.yaml
new file mode 100644
index 000000000000..0a4191dd0541
--- /dev/null
+++ b/test/ObjectYAML/wasm/name_section.yaml
@@ -0,0 +1,40 @@
+# RUN: yaml2obj %s | obj2yaml | FileCheck %s
+--- !WASM
+FileHeader:
+ Version: 0x00000001
+Sections:
+ - Type: TYPE
+ Signatures:
+ - ReturnType: I32
+ ParamTypes:
+ - I32
+ - Type: IMPORT
+ Imports:
+ - Module: foo
+ Field: a
+ Kind: FUNCTION
+ SigIndex: 0
+ - Module: foo
+ Field: b
+ Kind: FUNCTION
+ SigIndex: 0
+ - Module: foo
+ Field: c
+ Kind: FUNCTION
+ SigIndex: 0
+ - Type: CUSTOM
+ Name: name
+ FunctionNames:
+ - Index: 1
+ Name: foo
+ - Index: 0
+ Name: bar
+...
+# CHECK: - Type: CUSTOM
+# CHECK-NEXT: Name: name
+# CHECK-NEXT: FunctionNames:
+# CHECK-NEXT: - Index: 1
+# CHECK-NEXT: Name: foo
+# CHECK-NEXT: - Index: 0
+# CHECK-NEXT: Name: bar
+# CHECK: ...
diff --git a/test/Other/new-pm-defaults.ll b/test/Other/new-pm-defaults.ll
index a4a1c1f546c6..f712dc7b63ca 100644
--- a/test/Other/new-pm-defaults.ll
+++ b/test/Other/new-pm-defaults.ll
@@ -57,6 +57,8 @@
; CHECK-O-NEXT: Running pass: RequireAnalysisPass<{{.*}}GlobalsAA
; CHECK-O-NEXT: Running analysis: GlobalsAA
; CHECK-O-NEXT: Running analysis: CallGraphAnalysis
+; CHECK-O-NEXT: Running pass: RequireAnalysisPass<{{.*}}ProfileSummaryAnalysis
+; CHECK-O-NEXT: Running analysis: ProfileSummaryAnalysis
; CHECK-O-NEXT: Running pass: ModuleToPostOrderCGSCCPassAdaptor<{{.*}}LazyCallGraph{{.*}}>
; CHECK-O-NEXT: Running analysis: InnerAnalysisManagerProxy
; CHECK-O-NEXT: Running analysis: LazyCallGraphAnalysis
diff --git a/test/Transforms/ArgumentPromotion/pr32917.ll b/test/Transforms/ArgumentPromotion/pr32917.ll
new file mode 100644
index 000000000000..a2aeac081cea
--- /dev/null
+++ b/test/Transforms/ArgumentPromotion/pr32917.ll
@@ -0,0 +1,23 @@
+; RUN: opt < %s -argpromotion -S | FileCheck %s
+; PR 32917
+
+@b = common local_unnamed_addr global i32 0, align 4
+@a = common local_unnamed_addr global i32 0, align 4
+
+define i32 @fn2() local_unnamed_addr {
+ %1 = load i32, i32* @b, align 4
+ %2 = sext i32 %1 to i64
+ %3 = inttoptr i64 %2 to i32*
+ call fastcc void @fn1(i32* %3)
+ ret i32 undef
+}
+
+define internal fastcc void @fn1(i32* nocapture readonly) unnamed_addr {
+ %2 = getelementptr inbounds i32, i32* %0, i64 -1
+ %3 = load i32, i32* %2, align 4
+ store i32 %3, i32* @a, align 4
+ ret void
+}
+
+; CHECK: getelementptr {{.*}} -1
+; CHECK-NOT: getelementptr {{.*}} 4294967295
diff --git a/test/Transforms/CodeExtractor/PartialInlineOptRemark.ll b/test/Transforms/CodeExtractor/PartialInlineOptRemark.ll
index b2442b8b173c..c0d89d606d66 100644
--- a/test/Transforms/CodeExtractor/PartialInlineOptRemark.ll
+++ b/test/Transforms/CodeExtractor/PartialInlineOptRemark.ll
@@ -32,52 +32,52 @@ bb2: ; preds = %bb1, %bb
ret i32 %tmp3, !dbg !19
}
-define i32 @bar_noinline(i32 %arg) local_unnamed_addr #1 !dbg !5 {
+define i32 @bar_noinline(i32 %arg) local_unnamed_addr #1 !dbg !23 {
bb:
- %tmp = icmp slt i32 %arg, 0, !dbg !7
- br i1 %tmp, label %bb1, label %bb2, !dbg !8
+ %tmp = icmp slt i32 %arg, 0, !dbg !24
+ br i1 %tmp, label %bb1, label %bb2, !dbg !24
bb1: ; preds = %bb
- tail call void (...) @foo() #0, !dbg !9
- tail call void (...) @foo() #0, !dbg !10
- tail call void (...) @foo() #0, !dbg !11
- br label %bb2, !dbg !18
+ tail call void (...) @foo() #0, !dbg !24
+ tail call void (...) @foo() #0, !dbg !24
+ tail call void (...) @foo() #0, !dbg !24
+ br label %bb2, !dbg !24
bb2: ; preds = %bb1, %bb
%tmp3 = phi i32 [ 0, %bb1 ], [ 1, %bb ]
- ret i32 %tmp3, !dbg !19
+ ret i32 %tmp3, !dbg !24
}
-define i32 @bar_alwaysinline(i32 %arg) local_unnamed_addr #2 !dbg !5 {
+define i32 @bar_alwaysinline(i32 %arg) local_unnamed_addr #2 !dbg !25 {
bb:
- %tmp = icmp slt i32 %arg, 0, !dbg !7
- br i1 %tmp, label %bb1, label %bb2, !dbg !8
+ %tmp = icmp slt i32 %arg, 0, !dbg !26
+ br i1 %tmp, label %bb1, label %bb2, !dbg !26
bb1: ; preds = %bb
- tail call void (...) @foo() #0, !dbg !9
- tail call void (...) @foo() #0, !dbg !10
- tail call void (...) @foo() #0, !dbg !11
- br label %bb2, !dbg !18
+ tail call void (...) @foo() #0, !dbg !26
+ tail call void (...) @foo() #0, !dbg !26
+ tail call void (...) @foo() #0, !dbg !26
+ br label %bb2, !dbg !26
bb2: ; preds = %bb1, %bb
%tmp3 = phi i32 [ 0, %bb1 ], [ 1, %bb ]
- ret i32 %tmp3, !dbg !19
+ ret i32 %tmp3, !dbg !26
}
-define i32 @bar_cold(i32 %arg) local_unnamed_addr #3 !dbg !5 {
+define i32 @bar_cold(i32 %arg) local_unnamed_addr #3 !dbg !27 {
bb:
- %tmp = icmp slt i32 %arg, 0, !dbg !7
- br i1 %tmp, label %bb1, label %bb2, !dbg !8
+ %tmp = icmp slt i32 %arg, 0, !dbg !28
+ br i1 %tmp, label %bb1, label %bb2, !dbg !28
bb1: ; preds = %bb
- tail call void (...) @foo() #0, !dbg !9
- tail call void (...) @foo() #0, !dbg !10
- tail call void (...) @foo() #0, !dbg !11
- br label %bb2, !dbg !18
+ tail call void (...) @foo() #0, !dbg !28
+ tail call void (...) @foo() #0, !dbg !28
+ tail call void (...) @foo() #0, !dbg !28
+ br label %bb2, !dbg !28
bb2: ; preds = %bb1, %bb
%tmp3 = phi i32 [ 0, %bb1 ], [ 1, %bb ]
- ret i32 %tmp3, !dbg !19
+ ret i32 %tmp3, !dbg !28
}
; Function Attrs: nounwind
@@ -130,3 +130,9 @@ attributes #3 = { cold nounwind }
!20 = distinct !DISubprogram(name: "dummy_caller", scope: !1, file: !1, line: 19, type: !6, isLocal: false, isDefinition: true, scopeLine: 19, flags: DIFlagPrototyped, isOptimized: true, unit: !0, variables: !2)
!21 = !DILocation(line: 21, column: 11, scope: !20)
!22 = !DILocation(line: 21, column: 4, scope: !20)
+!23 = distinct !DISubprogram(name: "bar", scope: !1, file: !1, line: 3, type: !6, isLocal: false, isDefinition: true, scopeLine: 3, flags: DIFlagPrototyped, isOptimized: true, unit: !0, variables: !2)
+!24 = !DILocation(line: 4, column: 6, scope: !23)
+!25 = distinct !DISubprogram(name: "bar", scope: !1, file: !1, line: 3, type: !6, isLocal: false, isDefinition: true, scopeLine: 3, flags: DIFlagPrototyped, isOptimized: true, unit: !0, variables: !2)
+!26 = !DILocation(line: 4, column: 6, scope: !25)
+!27 = distinct !DISubprogram(name: "bar", scope: !1, file: !1, line: 3, type: !6, isLocal: false, isDefinition: true, scopeLine: 3, flags: DIFlagPrototyped, isOptimized: true, unit: !0, variables: !2)
+!28 = !DILocation(line: 4, column: 6, scope: !27)
diff --git a/test/Transforms/Inline/inline-hot-callsite.ll b/test/Transforms/Inline/inline-hot-callsite.ll
index ebf4030d3d10..48fa3039741f 100644
--- a/test/Transforms/Inline/inline-hot-callsite.ll
+++ b/test/Transforms/Inline/inline-hot-callsite.ll
@@ -1,16 +1,21 @@
-; RUN: opt < %s -inline -inline-threshold=0 -hot-callsite-threshold=100 -S | FileCheck %s
-; RUN: opt < %s -passes='require<profile-summary>,cgscc(inline)' -inline-threshold=0 -hot-callsite-threshold=100 -S | FileCheck %s
-
; This tests that a hot callsite gets the (higher) inlinehint-threshold even without
; without inline hints and gets inlined because the cost is less than
; inlinehint-threshold. A cold callee with identical body does not get inlined because
; cost exceeds the inline-threshold
+; RUN: opt < %s -inline -inline-threshold=0 -hot-callsite-threshold=100 -S | FileCheck %s
+; RUN: opt < %s -passes='require<profile-summary>,cgscc(inline)' -inline-threshold=0 -hot-callsite-threshold=100 -S | FileCheck %s
+
+; Run this with the default O2 pipeline to test that profile summary analysis
+; is available during inlining.
+; RUN: opt < %s -passes='default<O2>' -inline-threshold=0 -hot-callsite-threshold=100 -S | FileCheck %s
+
define i32 @callee1(i32 %x) {
%x1 = add i32 %x, 1
%x2 = add i32 %x1, 1
%x3 = add i32 %x2, 1
call void @extern()
+ call void @extern()
ret i32 %x3
}
@@ -20,6 +25,7 @@ define i32 @callee2(i32 %x) {
%x2 = add i32 %x1, 1
%x3 = add i32 %x2, 1
call void @extern()
+ call void @extern()
ret i32 %x3
}
diff --git a/test/Transforms/Inline/prof-update.ll b/test/Transforms/Inline/prof-update.ll
index 38fcc7e45996..3fefa1c56cea 100644
--- a/test/Transforms/Inline/prof-update.ll
+++ b/test/Transforms/Inline/prof-update.ll
@@ -3,6 +3,7 @@
declare void @ext();
declare void @ext1();
+@func = global void ()* null
; CHECK: define void @callee(i32 %n) !prof ![[ENTRY_COUNT:[0-9]*]]
define void @callee(i32 %n) !prof !1 {
@@ -17,12 +18,16 @@ cond_false:
; ext is cloned and updated.
; CHECK: call void @ext(), !prof ![[COUNT_CALLEE:[0-9]*]]
call void @ext(), !prof !2
+ %f = load void ()*, void ()** @func
+; CHECK: call void %f(), !prof ![[COUNT_IND_CALLEE:[0-9]*]]
+ call void %f(), !prof !4
ret void
}
; CHECK: define void @caller()
define void @caller() {
; CHECK: call void @ext(), !prof ![[COUNT_CALLER:[0-9]*]]
+; CHECK: call void %f.i(), !prof ![[COUNT_IND_CALLER:[0-9]*]]
call void @callee(i32 15), !prof !3
ret void
}
@@ -32,8 +37,11 @@ define void @caller() {
!1 = !{!"function_entry_count", i64 1000}
!2 = !{!"branch_weights", i64 2000}
!3 = !{!"branch_weights", i64 400}
+!4 = !{!"VP", i32 0, i64 140, i64 111, i64 80, i64 222, i64 40, i64 333, i64 20}
attributes #0 = { alwaysinline }
; CHECK: ![[ENTRY_COUNT]] = !{!"function_entry_count", i64 600}
; CHECK: ![[COUNT_CALLEE1]] = !{!"branch_weights", i64 2000}
-; CHECK: ![[COUNT_CALLEE]] = !{!"branch_weights", i32 1200}
-; CHECK: ![[COUNT_CALLER]] = !{!"branch_weights", i32 800}
+; CHECK: ![[COUNT_CALLEE]] = !{!"branch_weights", i64 1200}
+; CHECK: ![[COUNT_IND_CALLEE]] = !{!"VP", i32 0, i64 84, i64 111, i64 48, i64 222, i64 24, i64 333, i64 12}
+; CHECK: ![[COUNT_CALLER]] = !{!"branch_weights", i64 800}
+; CHECK: ![[COUNT_IND_CALLER]] = !{!"VP", i32 0, i64 56, i64 111, i64 32, i64 222, i64 16, i64 333, i64 8}
diff --git a/test/Transforms/InstCombine/2005-06-16-SetCCOrSetCCMiscompile.ll b/test/Transforms/InstCombine/2005-06-16-SetCCOrSetCCMiscompile.ll
deleted file mode 100644
index 9c989b9ecf8a..000000000000
--- a/test/Transforms/InstCombine/2005-06-16-SetCCOrSetCCMiscompile.ll
+++ /dev/null
@@ -1,14 +0,0 @@
-; RUN: opt < %s -instcombine -S | \
-; RUN: grep "ret i1 true"
-; PR586
-
-@g_07918478 = external global i32 ; <i32*> [#uses=1]
-
-define i1 @test() {
- %tmp.0 = load i32, i32* @g_07918478 ; <i32> [#uses=2]
- %tmp.1 = icmp ne i32 %tmp.0, 0 ; <i1> [#uses=1]
- %tmp.4 = icmp ult i32 %tmp.0, 4111 ; <i1> [#uses=1]
- %bothcond = or i1 %tmp.1, %tmp.4 ; <i1> [#uses=1]
- ret i1 %bothcond
-}
-
diff --git a/test/Transforms/InstCombine/AddOverFlow.ll b/test/Transforms/InstCombine/AddOverFlow.ll
index a341cb042ccf..91fa86e81579 100644
--- a/test/Transforms/InstCombine/AddOverFlow.ll
+++ b/test/Transforms/InstCombine/AddOverFlow.ll
@@ -95,6 +95,44 @@ define i16 @ripple_nsw2(i16 %x, i16 %y) {
ret i16 %c
}
+; CHECK-LABEL: @ripple_nsw3
+; CHECK: add nsw i16 %a, %b
+define i16 @ripple_nsw3(i16 %x, i16 %y) {
+ %a = and i16 %y, 43691
+ %b = and i16 %x, 21843
+ %c = add i16 %a, %b
+ ret i16 %c
+}
+
+; Like the previous test, but flip %a and %b
+; CHECK-LABEL: @ripple_nsw4
+; CHECK: add nsw i16 %b, %a
+define i16 @ripple_nsw4(i16 %x, i16 %y) {
+ %a = and i16 %y, 43691
+ %b = and i16 %x, 21843
+ %c = add i16 %b, %a
+ ret i16 %c
+}
+
+; CHECK-LABEL: @ripple_nsw5
+; CHECK: add nsw i16 %a, %b
+define i16 @ripple_nsw5(i16 %x, i16 %y) {
+ %a = or i16 %y, 43691
+ %b = or i16 %x, 54613
+ %c = add i16 %a, %b
+ ret i16 %c
+}
+
+; Like the previous test, but flip %a and %b
+; CHECK-LABEL: @ripple_nsw6
+; CHECK: add nsw i16 %b, %a
+define i16 @ripple_nsw6(i16 %x, i16 %y) {
+ %a = or i16 %y, 43691
+ %b = or i16 %x, 54613
+ %c = add i16 %b, %a
+ ret i16 %c
+}
+
; CHECK-LABEL: @ripple_no_nsw1
; CHECK: add i32 %a, %x
define i32 @ripple_no_nsw1(i32 %x, i32 %y) {
@@ -116,3 +154,41 @@ define i16 @ripple_no_nsw2(i16 %x, i16 %y) {
%c = add i16 %a, %b
ret i16 %c
}
+
+; CHECK-LABEL: @ripple_no_nsw3
+; CHECK: add i16 %a, %b
+define i16 @ripple_no_nsw3(i16 %x, i16 %y) {
+ %a = and i16 %y, 43691
+ %b = and i16 %x, 21845
+ %c = add i16 %a, %b
+ ret i16 %c
+}
+
+; Like the previous test, but flip %a and %b
+; CHECK-LABEL: @ripple_no_nsw4
+; CHECK: add i16 %b, %a
+define i16 @ripple_no_nsw4(i16 %x, i16 %y) {
+ %a = and i16 %y, 43691
+ %b = and i16 %x, 21845
+ %c = add i16 %b, %a
+ ret i16 %c
+}
+
+; CHECK-LABEL: @ripple_no_nsw5
+; CHECK: add i16 %a, %b
+define i16 @ripple_no_nsw5(i16 %x, i16 %y) {
+ %a = or i16 %y, 43689
+ %b = or i16 %x, 54613
+ %c = add i16 %a, %b
+ ret i16 %c
+}
+
+; Like the previous test, but flip %a and %b
+; CHECK-LABEL: @ripple_no_nsw6
+; CHECK: add i16 %b, %a
+define i16 @ripple_no_nsw6(i16 %x, i16 %y) {
+ %a = or i16 %y, 43689
+ %b = or i16 %x, 54613
+ %c = add i16 %b, %a
+ ret i16 %c
+}
diff --git a/test/Transforms/InstCombine/and-or-icmps.ll b/test/Transforms/InstCombine/and-or-icmps.ll
index 464f390f988f..165f5d1bffed 100644
--- a/test/Transforms/InstCombine/and-or-icmps.ll
+++ b/test/Transforms/InstCombine/and-or-icmps.ll
@@ -15,9 +15,7 @@ define i1 @PR1817_1(i32 %X) {
define i1 @PR1817_2(i32 %X) {
; CHECK-LABEL: @PR1817_2(
; CHECK-NEXT: [[A:%.*]] = icmp slt i32 %X, 10
-; CHECK-NEXT: [[B:%.*]] = icmp ult i32 %X, 10
-; CHECK-NEXT: [[C:%.*]] = or i1 [[A]], [[B]]
-; CHECK-NEXT: ret i1 [[C]]
+; CHECK-NEXT: ret i1 [[A]]
;
%A = icmp slt i32 %X, 10
%B = icmp ult i32 %X, 10
diff --git a/test/Transforms/InstCombine/debuginfo-dce.ll b/test/Transforms/InstCombine/debuginfo-dce.ll
index 58e9d7d767e9..086743e80820 100644
--- a/test/Transforms/InstCombine/debuginfo-dce.ll
+++ b/test/Transforms/InstCombine/debuginfo-dce.ll
@@ -37,60 +37,60 @@ entry:
ret void, !dbg !21
}
-define void @salvage_bitcast(%struct.entry* %queue) local_unnamed_addr #0 !dbg !14 {
+define void @salvage_bitcast(%struct.entry* %queue) local_unnamed_addr #0 !dbg !22 {
entry:
%im_not_dead = alloca i8*
- %0 = bitcast %struct.entry* %queue to i8*, !dbg !19
- %1 = bitcast %struct.entry* %queue to i8*, !dbg !19
- call void @llvm.dbg.value(metadata i8* %1, i64 0, metadata !18, metadata !20), !dbg !19
+ %0 = bitcast %struct.entry* %queue to i8*, !dbg !23
+ %1 = bitcast %struct.entry* %queue to i8*, !dbg !23
+ call void @llvm.dbg.value(metadata i8* %1, i64 0, metadata !24, metadata !20), !dbg !23
; CHECK: define void @salvage_bitcast
; CHECK-NEXT: entry:
; CHECK-NEXT: call void @llvm.dbg.value(metadata %struct.entry* %queue, i64 0,
; CHECK-SAME: metadata ![[BITCAST_EXPR:[0-9]+]])
store i8* %1, i8** %im_not_dead, align 8
- ret void, !dbg !21
+ ret void, !dbg !23
}
-define void @salvage_gep0(%struct.entry* %queue, %struct.entry* %end) local_unnamed_addr #0 !dbg !14 {
+define void @salvage_gep0(%struct.entry* %queue, %struct.entry* %end) local_unnamed_addr #0 !dbg !25 {
entry:
%im_not_dead = alloca %struct.entry**
- %0 = getelementptr inbounds %struct.entry, %struct.entry* %queue, i32 -1, i32 0, !dbg !19
- %1 = getelementptr inbounds %struct.entry, %struct.entry* %queue, i32 -1, i32 0, !dbg !19
- call void @llvm.dbg.value(metadata %struct.entry** %1, i64 0, metadata !18, metadata !20), !dbg !19
+ %0 = getelementptr inbounds %struct.entry, %struct.entry* %queue, i32 -1, i32 0, !dbg !26
+ %1 = getelementptr inbounds %struct.entry, %struct.entry* %queue, i32 -1, i32 0, !dbg !26
+ call void @llvm.dbg.value(metadata %struct.entry** %1, i64 0, metadata !27, metadata !20), !dbg !26
; CHECK: define void @salvage_gep0
; CHECK-NEXT: entry:
; CHECK-NEXT: call void @llvm.dbg.value(metadata %struct.entry* %queue, i64 0,
; CHECK-SAME: metadata ![[GEP0_EXPR:[0-9]+]])
store %struct.entry** %1, %struct.entry*** %im_not_dead, align 8
- ret void, !dbg !21
+ ret void, !dbg !26
}
-define void @salvage_gep1(%struct.entry* %queue, %struct.entry* %end) local_unnamed_addr #0 !dbg !14 {
+define void @salvage_gep1(%struct.entry* %queue, %struct.entry* %end) local_unnamed_addr #0 !dbg !28 {
entry:
%im_not_dead = alloca %struct.entry**
- %0 = getelementptr inbounds %struct.entry, %struct.entry* %queue, i32 -1, i32 0, !dbg !19
- %1 = getelementptr inbounds %struct.entry, %struct.entry* %queue, i32 -1, i32 0, !dbg !19
- call void @llvm.dbg.value(metadata %struct.entry** %1, i64 0, metadata !18, metadata !DIExpression(DW_OP_LLVM_fragment, 0, 32)), !dbg !19
+ %0 = getelementptr inbounds %struct.entry, %struct.entry* %queue, i32 -1, i32 0, !dbg !29
+ %1 = getelementptr inbounds %struct.entry, %struct.entry* %queue, i32 -1, i32 0, !dbg !29
+ call void @llvm.dbg.value(metadata %struct.entry** %1, i64 0, metadata !30, metadata !DIExpression(DW_OP_LLVM_fragment, 0, 32)), !dbg !29
; CHECK: define void @salvage_gep1
; CHECK-NEXT: entry:
; CHECK-NEXT: call void @llvm.dbg.value(metadata %struct.entry* %queue, i64 0,
; CHECK-SAME: metadata ![[GEP1_EXPR:[0-9]+]])
store %struct.entry** %1, %struct.entry*** %im_not_dead, align 8
- ret void, !dbg !21
+ ret void, !dbg !29
}
-define void @salvage_gep2(%struct.entry* %queue, %struct.entry* %end) local_unnamed_addr #0 !dbg !14 {
+define void @salvage_gep2(%struct.entry* %queue, %struct.entry* %end) local_unnamed_addr #0 !dbg !31 {
entry:
%im_not_dead = alloca %struct.entry**
- %0 = getelementptr inbounds %struct.entry, %struct.entry* %queue, i32 -1, i32 0, !dbg !19
- %1 = getelementptr inbounds %struct.entry, %struct.entry* %queue, i32 -1, i32 0, !dbg !19
- call void @llvm.dbg.value(metadata %struct.entry** %1, i64 0, metadata !18, metadata !DIExpression(DW_OP_stack_value)), !dbg !19
+ %0 = getelementptr inbounds %struct.entry, %struct.entry* %queue, i32 -1, i32 0, !dbg !32
+ %1 = getelementptr inbounds %struct.entry, %struct.entry* %queue, i32 -1, i32 0, !dbg !32
+ call void @llvm.dbg.value(metadata %struct.entry** %1, i64 0, metadata !33, metadata !DIExpression(DW_OP_stack_value)), !dbg !32
; CHECK: define void @salvage_gep2
; CHECK-NEXT: entry:
; CHECK-NEXT: call void @llvm.dbg.value(metadata %struct.entry* %queue, i64 0,
; CHECK-SAME: metadata ![[GEP2_EXPR:[0-9]+]])
store %struct.entry** %1, %struct.entry*** %im_not_dead, align 8
- ret void, !dbg !21
+ ret void, !dbg !32
}
; CHECK: ![[LOAD_EXPR]] = !DIExpression(DW_OP_deref, DW_OP_plus, 0)
@@ -132,3 +132,15 @@ attributes #1 = { nounwind readnone }
!19 = !DILocation(line: 6, column: 17, scope: !14)
!20 = !DIExpression(DW_OP_plus, 0)
!21 = !DILocation(line: 11, column: 1, scope: !14)
+!22 = distinct !DISubprogram(name: "scan", scope: !1, file: !1, line: 4, type: !15, isLocal: false, isDefinition: true, scopeLine: 5, flags: DIFlagPrototyped, isOptimized: true, unit: !0, variables: !17)
+!23 = !DILocation(line: 6, column: 17, scope: !22)
+!24 = !DILocalVariable(name: "entry", scope: !22, file: !1, line: 6, type: !4)
+!25 = distinct !DISubprogram(name: "scan", scope: !1, file: !1, line: 4, type: !15, isLocal: false, isDefinition: true, scopeLine: 5, flags: DIFlagPrototyped, isOptimized: true, unit: !0, variables: !17)
+!26 = !DILocation(line: 6, column: 17, scope: !25)
+!27 = !DILocalVariable(name: "entry", scope: !25, file: !1, line: 6, type: !4)
+!28 = distinct !DISubprogram(name: "scan", scope: !1, file: !1, line: 4, type: !15, isLocal: false, isDefinition: true, scopeLine: 5, flags: DIFlagPrototyped, isOptimized: true, unit: !0, variables: !17)
+!29 = !DILocation(line: 6, column: 17, scope: !28)
+!30 = !DILocalVariable(name: "entry", scope: !28, file: !1, line: 6, type: !4)
+!31 = distinct !DISubprogram(name: "scan", scope: !1, file: !1, line: 4, type: !15, isLocal: false, isDefinition: true, scopeLine: 5, flags: DIFlagPrototyped, isOptimized: true, unit: !0, variables: !17)
+!32 = !DILocation(line: 6, column: 17, scope: !31)
+!33 = !DILocalVariable(name: "entry", scope: !31, file: !1, line: 6, type: !4)
diff --git a/test/Transforms/InstCombine/demand_shrink_nsw.ll b/test/Transforms/InstCombine/demand_shrink_nsw.ll
index f49174295167..4f7d00e32aaf 100644
--- a/test/Transforms/InstCombine/demand_shrink_nsw.ll
+++ b/test/Transforms/InstCombine/demand_shrink_nsw.ll
@@ -3,7 +3,7 @@
; The constant at %v35 should be shrunk, but this must lead to the nsw flag of
; %v43 getting removed so that %v44 is not illegally optimized away.
; CHECK-LABEL: @foo
-; CHECK: %v35 = add nuw i32 %v34, 1362915575
+; CHECK: %v35 = add nuw nsw i32 %v34, 1362915575
; ...
; CHECK: add nuw i32 %v42, 1533579450
; CHECK-NEXT: %v44 = or i32 %v43, -2147483648
diff --git a/test/Transforms/InstCombine/or.ll b/test/Transforms/InstCombine/or.ll
index 9ae5eafdfccf..bfafd66ebb41 100644
--- a/test/Transforms/InstCombine/or.ll
+++ b/test/Transforms/InstCombine/or.ll
@@ -661,17 +661,6 @@ define i1 @test47(i8 signext %c) {
ret i1 %or
}
-define i1 @test48(i64 %x, i1 %b) {
-; CHECK-LABEL: @test48(
-; CHECK-NEXT: ret i1 true
-;
- %1 = icmp ult i64 %x, 2305843009213693952
- %2 = icmp ugt i64 %x, 2305843009213693951
- %.b = or i1 %2, %b
- %3 = or i1 %1, %.b
- ret i1 %3
-}
-
define i32 @test49(i1 %C) {
; CHECK-LABEL: @test49(
; CHECK-NEXT: [[V:%.*]] = select i1 [[C:%.*]], i32 1019, i32 123
diff --git a/test/Transforms/InstCombine/strlen-1.ll b/test/Transforms/InstCombine/strlen-1.ll
index f3287c0de35f..1e0dfb6a3088 100644
--- a/test/Transforms/InstCombine/strlen-1.ll
+++ b/test/Transforms/InstCombine/strlen-1.ll
@@ -64,13 +64,14 @@ define i1 @test_simplify5() {
ret i1 %eq_hello
}
-define i1 @test_simplify6() {
+define i1 @test_simplify6(i8* %str_p) {
; CHECK-LABEL: @test_simplify6(
-; CHECK-NEXT: ret i1 true
+; CHECK-NEXT: [[STRLENFIRST:%.*]] = load i8, i8* [[STR_P:%.*]], align 1
+; CHECK-NEXT: [[EQ_NULL:%.*]] = icmp eq i8 [[STRLENFIRST]], 0
+; CHECK-NEXT: ret i1 [[EQ_NULL]]
;
- %null_p = getelementptr [1 x i8], [1 x i8]* @null, i32 0, i32 0
- %null_l = call i32 @strlen(i8* %null_p)
- %eq_null = icmp eq i32 %null_l, 0
+ %str_l = call i32 @strlen(i8* %str_p)
+ %eq_null = icmp eq i32 %str_l, 0
ret i1 %eq_null
}
@@ -86,13 +87,14 @@ define i1 @test_simplify7() {
ret i1 %ne_hello
}
-define i1 @test_simplify8() {
+define i1 @test_simplify8(i8* %str_p) {
; CHECK-LABEL: @test_simplify8(
-; CHECK-NEXT: ret i1 false
+; CHECK-NEXT: [[STRLENFIRST:%.*]] = load i8, i8* [[STR_P:%.*]], align 1
+; CHECK-NEXT: [[NE_NULL:%.*]] = icmp ne i8 [[STRLENFIRST]], 0
+; CHECK-NEXT: ret i1 [[NE_NULL]]
;
- %null_p = getelementptr [1 x i8], [1 x i8]* @null, i32 0, i32 0
- %null_l = call i32 @strlen(i8* %null_p)
- %ne_null = icmp ne i32 %null_l, 0
+ %str_l = call i32 @strlen(i8* %str_p)
+ %ne_null = icmp ne i32 %str_l, 0
ret i1 %ne_null
}
diff --git a/test/Transforms/InstSimplify/AndOrXor.ll b/test/Transforms/InstSimplify/AndOrXor.ll
index f9aaa4fa0c6c..e059d77f1fa8 100644
--- a/test/Transforms/InstSimplify/AndOrXor.ll
+++ b/test/Transforms/InstSimplify/AndOrXor.ll
@@ -468,6 +468,51 @@ define <2 x i3> @and_of_different_cast_icmps_vec(<2 x i8> %i, <2 x i16> %j) {
ret <2 x i3> %and
}
+define i32 @or_of_zexted_icmps(i32 %i) {
+; CHECK-LABEL: @or_of_zexted_icmps(
+; CHECK-NEXT: ret i32 1
+;
+ %cmp0 = icmp ne i32 %i, 0
+ %conv0 = zext i1 %cmp0 to i32
+ %cmp1 = icmp uge i32 4, %i
+ %conv1 = zext i1 %cmp1 to i32
+ %or = or i32 %conv0, %conv1
+ ret i32 %or
+}
+
+; Try a different cast and weird vector types.
+
+define i3 @or_of_bitcast_icmps_vec(<3 x i65> %i) {
+; CHECK-LABEL: @or_of_bitcast_icmps_vec(
+; CHECK-NEXT: ret i3 bitcast (<3 x i1> <i1 true, i1 true, i1 true> to i3)
+;
+ %cmp0 = icmp sge <3 x i65> %i, zeroinitializer
+ %conv0 = bitcast <3 x i1> %cmp0 to i3
+ %cmp1 = icmp slt <3 x i65> %i, zeroinitializer
+ %conv1 = bitcast <3 x i1> %cmp1 to i3
+ %or = or i3 %conv0, %conv1
+ ret i3 %or
+}
+
+; We can't simplify if the casts are different.
+
+define i16 @or_of_different_cast_icmps(i8 %i) {
+; CHECK-LABEL: @or_of_different_cast_icmps(
+; CHECK-NEXT: [[CMP0:%.*]] = icmp ne i8 %i, 0
+; CHECK-NEXT: [[CONV0:%.*]] = zext i1 [[CMP0]] to i16
+; CHECK-NEXT: [[CMP1:%.*]] = icmp ne i8 %i, 1
+; CHECK-NEXT: [[CONV1:%.*]] = sext i1 [[CMP1]] to i16
+; CHECK-NEXT: [[OR:%.*]] = or i16 [[CONV0]], [[CONV1]]
+; CHECK-NEXT: ret i16 [[OR]]
+;
+ %cmp0 = icmp ne i8 %i, 0
+ %conv0 = zext i1 %cmp0 to i16
+ %cmp1 = icmp ne i8 %i, 1
+ %conv1 = sext i1 %cmp1 to i16
+ %or = or i16 %conv0, %conv1
+ ret i16 %or
+}
+
; (A & ~B) | (A ^ B) -> A ^ B
define i32 @test43(i32 %a, i32 %b) {
diff --git a/test/Transforms/InstSimplify/compare.ll b/test/Transforms/InstSimplify/compare.ll
index cd2fa880294a..883bf31ff77a 100644
--- a/test/Transforms/InstSimplify/compare.ll
+++ b/test/Transforms/InstSimplify/compare.ll
@@ -576,13 +576,38 @@ define i1 @srem3(i16 %X, i32 %Y) {
ret i1 %E
}
-define i1 @udiv2(i32 %X, i32 %Y, i32 %Z) {
+define i1 @udiv2(i32 %Z) {
; CHECK-LABEL: @udiv2(
+; CHECK-NEXT: ret i1 true
+;
%A = udiv exact i32 10, %Z
%B = udiv exact i32 20, %Z
%C = icmp ult i32 %A, %B
ret i1 %C
-; CHECK: ret i1 true
+}
+
+; Exact sdiv and equality preds can simplify.
+
+define i1 @sdiv_exact_equality(i32 %Z) {
+; CHECK-LABEL: @sdiv_exact_equality(
+; CHECK-NEXT: ret i1 false
+;
+ %A = sdiv exact i32 10, %Z
+ %B = sdiv exact i32 20, %Z
+ %C = icmp eq i32 %A, %B
+ ret i1 %C
+}
+
+; FIXME: But not other preds: PR32949 - https://bugs.llvm.org/show_bug.cgi?id=32949
+
+define i1 @sdiv_exact_not_equality(i32 %Z) {
+; CHECK-LABEL: @sdiv_exact_not_equality(
+; CHECK-NEXT: ret i1 true
+;
+ %A = sdiv exact i32 10, %Z
+ %B = sdiv exact i32 20, %Z
+ %C = icmp ult i32 %A, %B
+ ret i1 %C
}
define i1 @udiv3(i32 %X, i32 %Y) {
diff --git a/test/Transforms/InstSimplify/icmp-ranges.ll b/test/Transforms/InstSimplify/icmp-ranges.ll
index 292be6a8a559..45194f2df4f1 100644
--- a/test/Transforms/InstSimplify/icmp-ranges.ll
+++ b/test/Transforms/InstSimplify/icmp-ranges.ll
@@ -2729,6 +2729,2732 @@ define i1 @and_ult_ult_swap(i8 %x) {
ret i1 %c
}
+; eq
+; x == 13 || x == 17
+
+define i1 @or_eq_eq(i8 %x) {
+; CHECK-LABEL: @or_eq_eq(
+; CHECK-NEXT: [[A:%.*]] = icmp eq i8 %x, 13
+; CHECK-NEXT: [[B:%.*]] = icmp eq i8 %x, 17
+; CHECK-NEXT: [[C:%.*]] = or i1 [[A]], [[B]]
+; CHECK-NEXT: ret i1 [[C]]
+;
+ %a = icmp eq i8 %x, 13
+ %b = icmp eq i8 %x, 17
+ %c = or i1 %a, %b
+ ret i1 %c
+}
+
+; x == 13 || x != 17
+
+define i1 @or_eq_ne(i8 %x) {
+; CHECK-LABEL: @or_eq_ne(
+; CHECK-NEXT: [[B:%.*]] = icmp ne i8 %x, 17
+; CHECK-NEXT: ret i1 [[B]]
+;
+ %a = icmp eq i8 %x, 13
+ %b = icmp ne i8 %x, 17
+ %c = or i1 %a, %b
+ ret i1 %c
+}
+
+; x == 13 || x >=s 17
+
+define i1 @or_eq_sge(i8 %x) {
+; CHECK-LABEL: @or_eq_sge(
+; CHECK-NEXT: [[A:%.*]] = icmp eq i8 %x, 13
+; CHECK-NEXT: [[B:%.*]] = icmp sge i8 %x, 17
+; CHECK-NEXT: [[C:%.*]] = or i1 [[A]], [[B]]
+; CHECK-NEXT: ret i1 [[C]]
+;
+ %a = icmp eq i8 %x, 13
+ %b = icmp sge i8 %x, 17
+ %c = or i1 %a, %b
+ ret i1 %c
+}
+
+; x == 13 || x >s 17
+
+define i1 @or_eq_sgt(i8 %x) {
+; CHECK-LABEL: @or_eq_sgt(
+; CHECK-NEXT: [[A:%.*]] = icmp eq i8 %x, 13
+; CHECK-NEXT: [[B:%.*]] = icmp sgt i8 %x, 17
+; CHECK-NEXT: [[C:%.*]] = or i1 [[A]], [[B]]
+; CHECK-NEXT: ret i1 [[C]]
+;
+ %a = icmp eq i8 %x, 13
+ %b = icmp sgt i8 %x, 17
+ %c = or i1 %a, %b
+ ret i1 %c
+}
+
+; x == 13 || x <=s 17
+
+define i1 @or_eq_sle(i8 %x) {
+; CHECK-LABEL: @or_eq_sle(
+; CHECK-NEXT: [[B:%.*]] = icmp sle i8 %x, 17
+; CHECK-NEXT: ret i1 [[B]]
+;
+ %a = icmp eq i8 %x, 13
+ %b = icmp sle i8 %x, 17
+ %c = or i1 %a, %b
+ ret i1 %c
+}
+
+; x == 13 || x <s 17
+
+define i1 @or_eq_slt(i8 %x) {
+; CHECK-LABEL: @or_eq_slt(
+; CHECK-NEXT: [[B:%.*]] = icmp slt i8 %x, 17
+; CHECK-NEXT: ret i1 [[B]]
+;
+ %a = icmp eq i8 %x, 13
+ %b = icmp slt i8 %x, 17
+ %c = or i1 %a, %b
+ ret i1 %c
+}
+
+; x == 13 || x >=u 17
+
+define i1 @or_eq_uge(i8 %x) {
+; CHECK-LABEL: @or_eq_uge(
+; CHECK-NEXT: [[A:%.*]] = icmp eq i8 %x, 13
+; CHECK-NEXT: [[B:%.*]] = icmp uge i8 %x, 17
+; CHECK-NEXT: [[C:%.*]] = or i1 [[A]], [[B]]
+; CHECK-NEXT: ret i1 [[C]]
+;
+ %a = icmp eq i8 %x, 13
+ %b = icmp uge i8 %x, 17
+ %c = or i1 %a, %b
+ ret i1 %c
+}
+
+; x == 13 || x >u 17
+
+define i1 @or_eq_ugt(i8 %x) {
+; CHECK-LABEL: @or_eq_ugt(
+; CHECK-NEXT: [[A:%.*]] = icmp eq i8 %x, 13
+; CHECK-NEXT: [[B:%.*]] = icmp ugt i8 %x, 17
+; CHECK-NEXT: [[C:%.*]] = or i1 [[A]], [[B]]
+; CHECK-NEXT: ret i1 [[C]]
+;
+ %a = icmp eq i8 %x, 13
+ %b = icmp ugt i8 %x, 17
+ %c = or i1 %a, %b
+ ret i1 %c
+}
+
+; x == 13 || x <=u 17
+
+define i1 @or_eq_ule(i8 %x) {
+; CHECK-LABEL: @or_eq_ule(
+; CHECK-NEXT: [[B:%.*]] = icmp ule i8 %x, 17
+; CHECK-NEXT: ret i1 [[B]]
+;
+ %a = icmp eq i8 %x, 13
+ %b = icmp ule i8 %x, 17
+ %c = or i1 %a, %b
+ ret i1 %c
+}
+
+; x == 13 || x <u 17
+
+define i1 @or_eq_ult(i8 %x) {
+; CHECK-LABEL: @or_eq_ult(
+; CHECK-NEXT: [[B:%.*]] = icmp ult i8 %x, 17
+; CHECK-NEXT: ret i1 [[B]]
+;
+ %a = icmp eq i8 %x, 13
+ %b = icmp ult i8 %x, 17
+ %c = or i1 %a, %b
+ ret i1 %c
+}
+
+; ne
+; x != 13 || x == 17
+
+define i1 @or_ne_eq(i8 %x) {
+; CHECK-LABEL: @or_ne_eq(
+; CHECK-NEXT: [[A:%.*]] = icmp ne i8 %x, 13
+; CHECK-NEXT: ret i1 [[A]]
+;
+ %a = icmp ne i8 %x, 13
+ %b = icmp eq i8 %x, 17
+ %c = or i1 %a, %b
+ ret i1 %c
+}
+
+; x != 13 || x != 17
+
+define i1 @or_ne_ne(i8 %x) {
+; CHECK-LABEL: @or_ne_ne(
+; CHECK-NEXT: ret i1 true
+;
+ %a = icmp ne i8 %x, 13
+ %b = icmp ne i8 %x, 17
+ %c = or i1 %a, %b
+ ret i1 %c
+}
+
+; x != 13 || x >=s 17
+
+define i1 @or_ne_sge(i8 %x) {
+; CHECK-LABEL: @or_ne_sge(
+; CHECK-NEXT: [[A:%.*]] = icmp ne i8 %x, 13
+; CHECK-NEXT: ret i1 [[A]]
+;
+ %a = icmp ne i8 %x, 13
+ %b = icmp sge i8 %x, 17
+ %c = or i1 %a, %b
+ ret i1 %c
+}
+
+; x != 13 || x >s 17
+
+define i1 @or_ne_sgt(i8 %x) {
+; CHECK-LABEL: @or_ne_sgt(
+; CHECK-NEXT: [[A:%.*]] = icmp ne i8 %x, 13
+; CHECK-NEXT: ret i1 [[A]]
+;
+ %a = icmp ne i8 %x, 13
+ %b = icmp sgt i8 %x, 17
+ %c = or i1 %a, %b
+ ret i1 %c
+}
+
+; x != 13 || x <=s 17
+
+define i1 @or_ne_sle(i8 %x) {
+; CHECK-LABEL: @or_ne_sle(
+; CHECK-NEXT: ret i1 true
+;
+ %a = icmp ne i8 %x, 13
+ %b = icmp sle i8 %x, 17
+ %c = or i1 %a, %b
+ ret i1 %c
+}
+
+; x != 13 || x <s 17
+
+define i1 @or_ne_slt(i8 %x) {
+; CHECK-LABEL: @or_ne_slt(
+; CHECK-NEXT: ret i1 true
+;
+ %a = icmp ne i8 %x, 13
+ %b = icmp slt i8 %x, 17
+ %c = or i1 %a, %b
+ ret i1 %c
+}
+
+; x != 13 || x >=u 17
+
+define i1 @or_ne_uge(i8 %x) {
+; CHECK-LABEL: @or_ne_uge(
+; CHECK-NEXT: [[A:%.*]] = icmp ne i8 %x, 13
+; CHECK-NEXT: ret i1 [[A]]
+;
+ %a = icmp ne i8 %x, 13
+ %b = icmp uge i8 %x, 17
+ %c = or i1 %a, %b
+ ret i1 %c
+}
+
+; x != 13 || x >u 17
+
+define i1 @or_ne_ugt(i8 %x) {
+; CHECK-LABEL: @or_ne_ugt(
+; CHECK-NEXT: [[A:%.*]] = icmp ne i8 %x, 13
+; CHECK-NEXT: ret i1 [[A]]
+;
+ %a = icmp ne i8 %x, 13
+ %b = icmp ugt i8 %x, 17
+ %c = or i1 %a, %b
+ ret i1 %c
+}
+
+; x != 13 || x <=u 17
+
+define i1 @or_ne_ule(i8 %x) {
+; CHECK-LABEL: @or_ne_ule(
+; CHECK-NEXT: ret i1 true
+;
+ %a = icmp ne i8 %x, 13
+ %b = icmp ule i8 %x, 17
+ %c = or i1 %a, %b
+ ret i1 %c
+}
+
+; x != 13 || x <u 17
+
+define i1 @or_ne_ult(i8 %x) {
+; CHECK-LABEL: @or_ne_ult(
+; CHECK-NEXT: ret i1 true
+;
+ %a = icmp ne i8 %x, 13
+ %b = icmp ult i8 %x, 17
+ %c = or i1 %a, %b
+ ret i1 %c
+}
+
+; sge
+; x >=s 13 || x == 17
+
+define i1 @or_sge_eq(i8 %x) {
+; CHECK-LABEL: @or_sge_eq(
+; CHECK-NEXT: [[A:%.*]] = icmp sge i8 %x, 13
+; CHECK-NEXT: ret i1 [[A]]
+;
+ %a = icmp sge i8 %x, 13
+ %b = icmp eq i8 %x, 17
+ %c = or i1 %a, %b
+ ret i1 %c
+}
+
+; x >=s 13 || x != 17
+
+define i1 @or_sge_ne(i8 %x) {
+; CHECK-LABEL: @or_sge_ne(
+; CHECK-NEXT: ret i1 true
+;
+ %a = icmp sge i8 %x, 13
+ %b = icmp ne i8 %x, 17
+ %c = or i1 %a, %b
+ ret i1 %c
+}
+
+; x >=s 13 || x >=s 17
+
+define i1 @or_sge_sge(i8 %x) {
+; CHECK-LABEL: @or_sge_sge(
+; CHECK-NEXT: [[A:%.*]] = icmp sge i8 %x, 13
+; CHECK-NEXT: ret i1 [[A]]
+;
+ %a = icmp sge i8 %x, 13
+ %b = icmp sge i8 %x, 17
+ %c = or i1 %a, %b
+ ret i1 %c
+}
+
+; x >=s 13 || x >s 17
+
+define i1 @or_sge_sgt(i8 %x) {
+; CHECK-LABEL: @or_sge_sgt(
+; CHECK-NEXT: [[A:%.*]] = icmp sge i8 %x, 13
+; CHECK-NEXT: ret i1 [[A]]
+;
+ %a = icmp sge i8 %x, 13
+ %b = icmp sgt i8 %x, 17
+ %c = or i1 %a, %b
+ ret i1 %c
+}
+
+; x >=s 13 || x <=s 17
+
+define i1 @or_sge_sle(i8 %x) {
+; CHECK-LABEL: @or_sge_sle(
+; CHECK-NEXT: ret i1 true
+;
+ %a = icmp sge i8 %x, 13
+ %b = icmp sle i8 %x, 17
+ %c = or i1 %a, %b
+ ret i1 %c
+}
+
+; x >=s 13 || x <s 17
+
+define i1 @or_sge_slt(i8 %x) {
+; CHECK-LABEL: @or_sge_slt(
+; CHECK-NEXT: ret i1 true
+;
+ %a = icmp sge i8 %x, 13
+ %b = icmp slt i8 %x, 17
+ %c = or i1 %a, %b
+ ret i1 %c
+}
+
+; x >=s 13 || x >=u 17
+
+define i1 @or_sge_uge(i8 %x) {
+; CHECK-LABEL: @or_sge_uge(
+; CHECK-NEXT: [[A:%.*]] = icmp sge i8 %x, 13
+; CHECK-NEXT: [[B:%.*]] = icmp uge i8 %x, 17
+; CHECK-NEXT: [[C:%.*]] = or i1 [[A]], [[B]]
+; CHECK-NEXT: ret i1 [[C]]
+;
+ %a = icmp sge i8 %x, 13
+ %b = icmp uge i8 %x, 17
+ %c = or i1 %a, %b
+ ret i1 %c
+}
+
+; x >=s 13 || x >u 17
+
+define i1 @or_sge_ugt(i8 %x) {
+; CHECK-LABEL: @or_sge_ugt(
+; CHECK-NEXT: [[A:%.*]] = icmp sge i8 %x, 13
+; CHECK-NEXT: [[B:%.*]] = icmp ugt i8 %x, 17
+; CHECK-NEXT: [[C:%.*]] = or i1 [[A]], [[B]]
+; CHECK-NEXT: ret i1 [[C]]
+;
+ %a = icmp sge i8 %x, 13
+ %b = icmp ugt i8 %x, 17
+ %c = or i1 %a, %b
+ ret i1 %c
+}
+
+; x >=s 13 || x <=u 17
+
+define i1 @or_sge_ule(i8 %x) {
+; CHECK-LABEL: @or_sge_ule(
+; CHECK-NEXT: [[A:%.*]] = icmp sge i8 %x, 13
+; CHECK-NEXT: [[B:%.*]] = icmp ule i8 %x, 17
+; CHECK-NEXT: [[C:%.*]] = or i1 [[A]], [[B]]
+; CHECK-NEXT: ret i1 [[C]]
+;
+ %a = icmp sge i8 %x, 13
+ %b = icmp ule i8 %x, 17
+ %c = or i1 %a, %b
+ ret i1 %c
+}
+
+; x >=s 13 || x <u 17
+
+define i1 @or_sge_ult(i8 %x) {
+; CHECK-LABEL: @or_sge_ult(
+; CHECK-NEXT: [[A:%.*]] = icmp sge i8 %x, 13
+; CHECK-NEXT: [[B:%.*]] = icmp ult i8 %x, 17
+; CHECK-NEXT: [[C:%.*]] = or i1 [[A]], [[B]]
+; CHECK-NEXT: ret i1 [[C]]
+;
+ %a = icmp sge i8 %x, 13
+ %b = icmp ult i8 %x, 17
+ %c = or i1 %a, %b
+ ret i1 %c
+}
+
+; sgt
+; x >s 13 || x == 17
+
+define i1 @or_sgt_eq(i8 %x) {
+; CHECK-LABEL: @or_sgt_eq(
+; CHECK-NEXT: [[A:%.*]] = icmp sgt i8 %x, 13
+; CHECK-NEXT: ret i1 [[A]]
+;
+ %a = icmp sgt i8 %x, 13
+ %b = icmp eq i8 %x, 17
+ %c = or i1 %a, %b
+ ret i1 %c
+}
+
+; x >s 13 || x != 17
+
+define i1 @or_sgt_ne(i8 %x) {
+; CHECK-LABEL: @or_sgt_ne(
+; CHECK-NEXT: ret i1 true
+;
+ %a = icmp sgt i8 %x, 13
+ %b = icmp ne i8 %x, 17
+ %c = or i1 %a, %b
+ ret i1 %c
+}
+
+; x >s 13 || x >=s 17
+
+define i1 @or_sgt_sge(i8 %x) {
+; CHECK-LABEL: @or_sgt_sge(
+; CHECK-NEXT: [[A:%.*]] = icmp sgt i8 %x, 13
+; CHECK-NEXT: ret i1 [[A]]
+;
+ %a = icmp sgt i8 %x, 13
+ %b = icmp sge i8 %x, 17
+ %c = or i1 %a, %b
+ ret i1 %c
+}
+
+; x >s 13 || x >s 17
+
+define i1 @or_sgt_sgt(i8 %x) {
+; CHECK-LABEL: @or_sgt_sgt(
+; CHECK-NEXT: [[A:%.*]] = icmp sgt i8 %x, 13
+; CHECK-NEXT: ret i1 [[A]]
+;
+ %a = icmp sgt i8 %x, 13
+ %b = icmp sgt i8 %x, 17
+ %c = or i1 %a, %b
+ ret i1 %c
+}
+
+; x >s 13 || x <=s 17
+
+define i1 @or_sgt_sle(i8 %x) {
+; CHECK-LABEL: @or_sgt_sle(
+; CHECK-NEXT: ret i1 true
+;
+ %a = icmp sgt i8 %x, 13
+ %b = icmp sle i8 %x, 17
+ %c = or i1 %a, %b
+ ret i1 %c
+}
+
+; x >s 13 || x <s 17
+
+define i1 @or_sgt_slt(i8 %x) {
+; CHECK-LABEL: @or_sgt_slt(
+; CHECK-NEXT: ret i1 true
+;
+ %a = icmp sgt i8 %x, 13
+ %b = icmp slt i8 %x, 17
+ %c = or i1 %a, %b
+ ret i1 %c
+}
+
+; x >s 13 || x >=u 17
+
+define i1 @or_sgt_uge(i8 %x) {
+; CHECK-LABEL: @or_sgt_uge(
+; CHECK-NEXT: [[A:%.*]] = icmp sgt i8 %x, 13
+; CHECK-NEXT: [[B:%.*]] = icmp uge i8 %x, 17
+; CHECK-NEXT: [[C:%.*]] = or i1 [[A]], [[B]]
+; CHECK-NEXT: ret i1 [[C]]
+;
+ %a = icmp sgt i8 %x, 13
+ %b = icmp uge i8 %x, 17
+ %c = or i1 %a, %b
+ ret i1 %c
+}
+
+; x >s 13 || x >u 17
+
+define i1 @or_sgt_ugt(i8 %x) {
+; CHECK-LABEL: @or_sgt_ugt(
+; CHECK-NEXT: [[A:%.*]] = icmp sgt i8 %x, 13
+; CHECK-NEXT: [[B:%.*]] = icmp ugt i8 %x, 17
+; CHECK-NEXT: [[C:%.*]] = or i1 [[A]], [[B]]
+; CHECK-NEXT: ret i1 [[C]]
+;
+ %a = icmp sgt i8 %x, 13
+ %b = icmp ugt i8 %x, 17
+ %c = or i1 %a, %b
+ ret i1 %c
+}
+
+; x >s 13 || x <=u 17
+
+define i1 @or_sgt_ule(i8 %x) {
+; CHECK-LABEL: @or_sgt_ule(
+; CHECK-NEXT: [[A:%.*]] = icmp sgt i8 %x, 13
+; CHECK-NEXT: [[B:%.*]] = icmp ule i8 %x, 17
+; CHECK-NEXT: [[C:%.*]] = or i1 [[A]], [[B]]
+; CHECK-NEXT: ret i1 [[C]]
+;
+ %a = icmp sgt i8 %x, 13
+ %b = icmp ule i8 %x, 17
+ %c = or i1 %a, %b
+ ret i1 %c
+}
+
+; x >s 13 || x <u 17
+
+define i1 @or_sgt_ult(i8 %x) {
+; CHECK-LABEL: @or_sgt_ult(
+; CHECK-NEXT: [[A:%.*]] = icmp sgt i8 %x, 13
+; CHECK-NEXT: [[B:%.*]] = icmp ult i8 %x, 17
+; CHECK-NEXT: [[C:%.*]] = or i1 [[A]], [[B]]
+; CHECK-NEXT: ret i1 [[C]]
+;
+ %a = icmp sgt i8 %x, 13
+ %b = icmp ult i8 %x, 17
+ %c = or i1 %a, %b
+ ret i1 %c
+}
+
+; sle
+; x <=s 13 || x == 17
+
+define i1 @or_sle_eq(i8 %x) {
+; CHECK-LABEL: @or_sle_eq(
+; CHECK-NEXT: [[A:%.*]] = icmp sle i8 %x, 13
+; CHECK-NEXT: [[B:%.*]] = icmp eq i8 %x, 17
+; CHECK-NEXT: [[C:%.*]] = or i1 [[A]], [[B]]
+; CHECK-NEXT: ret i1 [[C]]
+;
+ %a = icmp sle i8 %x, 13
+ %b = icmp eq i8 %x, 17
+ %c = or i1 %a, %b
+ ret i1 %c
+}
+
+; x <=s 13 || x != 17
+
+define i1 @or_sle_ne(i8 %x) {
+; CHECK-LABEL: @or_sle_ne(
+; CHECK-NEXT: [[B:%.*]] = icmp ne i8 %x, 17
+; CHECK-NEXT: ret i1 [[B]]
+;
+ %a = icmp sle i8 %x, 13
+ %b = icmp ne i8 %x, 17
+ %c = or i1 %a, %b
+ ret i1 %c
+}
+
+; x <=s 13 || x >=s 17
+
+define i1 @or_sle_sge(i8 %x) {
+; CHECK-LABEL: @or_sle_sge(
+; CHECK-NEXT: [[A:%.*]] = icmp sle i8 %x, 13
+; CHECK-NEXT: [[B:%.*]] = icmp sge i8 %x, 17
+; CHECK-NEXT: [[C:%.*]] = or i1 [[A]], [[B]]
+; CHECK-NEXT: ret i1 [[C]]
+;
+ %a = icmp sle i8 %x, 13
+ %b = icmp sge i8 %x, 17
+ %c = or i1 %a, %b
+ ret i1 %c
+}
+
+; x <=s 13 || x >s 17
+
+define i1 @or_sle_sgt(i8 %x) {
+; CHECK-LABEL: @or_sle_sgt(
+; CHECK-NEXT: [[A:%.*]] = icmp sle i8 %x, 13
+; CHECK-NEXT: [[B:%.*]] = icmp sgt i8 %x, 17
+; CHECK-NEXT: [[C:%.*]] = or i1 [[A]], [[B]]
+; CHECK-NEXT: ret i1 [[C]]
+;
+ %a = icmp sle i8 %x, 13
+ %b = icmp sgt i8 %x, 17
+ %c = or i1 %a, %b
+ ret i1 %c
+}
+
+; x <=s 13 || x <=s 17
+
+define i1 @or_sle_sle(i8 %x) {
+; CHECK-LABEL: @or_sle_sle(
+; CHECK-NEXT: [[B:%.*]] = icmp sle i8 %x, 17
+; CHECK-NEXT: ret i1 [[B]]
+;
+ %a = icmp sle i8 %x, 13
+ %b = icmp sle i8 %x, 17
+ %c = or i1 %a, %b
+ ret i1 %c
+}
+
+; x <=s 13 || x <s 17
+
+define i1 @or_sle_slt(i8 %x) {
+; CHECK-LABEL: @or_sle_slt(
+; CHECK-NEXT: [[B:%.*]] = icmp slt i8 %x, 17
+; CHECK-NEXT: ret i1 [[B]]
+;
+ %a = icmp sle i8 %x, 13
+ %b = icmp slt i8 %x, 17
+ %c = or i1 %a, %b
+ ret i1 %c
+}
+
+; x <=s 13 || x >=u 17
+
+define i1 @or_sle_uge(i8 %x) {
+; CHECK-LABEL: @or_sle_uge(
+; CHECK-NEXT: [[A:%.*]] = icmp sle i8 %x, 13
+; CHECK-NEXT: [[B:%.*]] = icmp uge i8 %x, 17
+; CHECK-NEXT: [[C:%.*]] = or i1 [[A]], [[B]]
+; CHECK-NEXT: ret i1 [[C]]
+;
+ %a = icmp sle i8 %x, 13
+ %b = icmp uge i8 %x, 17
+ %c = or i1 %a, %b
+ ret i1 %c
+}
+
+; x <=s 13 || x >u 17
+
+define i1 @or_sle_ugt(i8 %x) {
+; CHECK-LABEL: @or_sle_ugt(
+; CHECK-NEXT: [[A:%.*]] = icmp sle i8 %x, 13
+; CHECK-NEXT: [[B:%.*]] = icmp ugt i8 %x, 17
+; CHECK-NEXT: [[C:%.*]] = or i1 [[A]], [[B]]
+; CHECK-NEXT: ret i1 [[C]]
+;
+ %a = icmp sle i8 %x, 13
+ %b = icmp ugt i8 %x, 17
+ %c = or i1 %a, %b
+ ret i1 %c
+}
+
+; x <=s 13 || x <=u 17
+
+define i1 @or_sle_ule(i8 %x) {
+; CHECK-LABEL: @or_sle_ule(
+; CHECK-NEXT: [[A:%.*]] = icmp sle i8 %x, 13
+; CHECK-NEXT: [[B:%.*]] = icmp ule i8 %x, 17
+; CHECK-NEXT: [[C:%.*]] = or i1 [[A]], [[B]]
+; CHECK-NEXT: ret i1 [[C]]
+;
+ %a = icmp sle i8 %x, 13
+ %b = icmp ule i8 %x, 17
+ %c = or i1 %a, %b
+ ret i1 %c
+}
+
+; x <=s 13 || x <u 17
+
+define i1 @or_sle_ult(i8 %x) {
+; CHECK-LABEL: @or_sle_ult(
+; CHECK-NEXT: [[A:%.*]] = icmp sle i8 %x, 13
+; CHECK-NEXT: [[B:%.*]] = icmp ult i8 %x, 17
+; CHECK-NEXT: [[C:%.*]] = or i1 [[A]], [[B]]
+; CHECK-NEXT: ret i1 [[C]]
+;
+ %a = icmp sle i8 %x, 13
+ %b = icmp ult i8 %x, 17
+ %c = or i1 %a, %b
+ ret i1 %c
+}
+
+; slt
+; x <s 13 || x == 17
+
+define i1 @or_slt_eq(i8 %x) {
+; CHECK-LABEL: @or_slt_eq(
+; CHECK-NEXT: [[A:%.*]] = icmp slt i8 %x, 13
+; CHECK-NEXT: [[B:%.*]] = icmp eq i8 %x, 17
+; CHECK-NEXT: [[C:%.*]] = or i1 [[A]], [[B]]
+; CHECK-NEXT: ret i1 [[C]]
+;
+ %a = icmp slt i8 %x, 13
+ %b = icmp eq i8 %x, 17
+ %c = or i1 %a, %b
+ ret i1 %c
+}
+
+; x <s 13 || x != 17
+
+define i1 @or_slt_ne(i8 %x) {
+; CHECK-LABEL: @or_slt_ne(
+; CHECK-NEXT: [[B:%.*]] = icmp ne i8 %x, 17
+; CHECK-NEXT: ret i1 [[B]]
+;
+ %a = icmp slt i8 %x, 13
+ %b = icmp ne i8 %x, 17
+ %c = or i1 %a, %b
+ ret i1 %c
+}
+
+; x <s 13 || x >=s 17
+
+define i1 @or_slt_sge(i8 %x) {
+; CHECK-LABEL: @or_slt_sge(
+; CHECK-NEXT: [[A:%.*]] = icmp slt i8 %x, 13
+; CHECK-NEXT: [[B:%.*]] = icmp sge i8 %x, 17
+; CHECK-NEXT: [[C:%.*]] = or i1 [[A]], [[B]]
+; CHECK-NEXT: ret i1 [[C]]
+;
+ %a = icmp slt i8 %x, 13
+ %b = icmp sge i8 %x, 17
+ %c = or i1 %a, %b
+ ret i1 %c
+}
+
+; x <s 13 || x >s 17
+
+define i1 @or_slt_sgt(i8 %x) {
+; CHECK-LABEL: @or_slt_sgt(
+; CHECK-NEXT: [[A:%.*]] = icmp slt i8 %x, 13
+; CHECK-NEXT: [[B:%.*]] = icmp sgt i8 %x, 17
+; CHECK-NEXT: [[C:%.*]] = or i1 [[A]], [[B]]
+; CHECK-NEXT: ret i1 [[C]]
+;
+ %a = icmp slt i8 %x, 13
+ %b = icmp sgt i8 %x, 17
+ %c = or i1 %a, %b
+ ret i1 %c
+}
+
+; x <s 13 || x <=s 17
+
+define i1 @or_slt_sle(i8 %x) {
+; CHECK-LABEL: @or_slt_sle(
+; CHECK-NEXT: [[B:%.*]] = icmp sle i8 %x, 17
+; CHECK-NEXT: ret i1 [[B]]
+;
+ %a = icmp slt i8 %x, 13
+ %b = icmp sle i8 %x, 17
+ %c = or i1 %a, %b
+ ret i1 %c
+}
+
+; x <s 13 || x <s 17
+
+define i1 @or_slt_slt(i8 %x) {
+; CHECK-LABEL: @or_slt_slt(
+; CHECK-NEXT: [[B:%.*]] = icmp slt i8 %x, 17
+; CHECK-NEXT: ret i1 [[B]]
+;
+ %a = icmp slt i8 %x, 13
+ %b = icmp slt i8 %x, 17
+ %c = or i1 %a, %b
+ ret i1 %c
+}
+
+; x <s 13 || x >=u 17
+
+define i1 @or_slt_uge(i8 %x) {
+; CHECK-LABEL: @or_slt_uge(
+; CHECK-NEXT: [[A:%.*]] = icmp slt i8 %x, 13
+; CHECK-NEXT: [[B:%.*]] = icmp uge i8 %x, 17
+; CHECK-NEXT: [[C:%.*]] = or i1 [[A]], [[B]]
+; CHECK-NEXT: ret i1 [[C]]
+;
+ %a = icmp slt i8 %x, 13
+ %b = icmp uge i8 %x, 17
+ %c = or i1 %a, %b
+ ret i1 %c
+}
+
+; x <s 13 || x >u 17
+
+define i1 @or_slt_ugt(i8 %x) {
+; CHECK-LABEL: @or_slt_ugt(
+; CHECK-NEXT: [[A:%.*]] = icmp slt i8 %x, 13
+; CHECK-NEXT: [[B:%.*]] = icmp ugt i8 %x, 17
+; CHECK-NEXT: [[C:%.*]] = or i1 [[A]], [[B]]
+; CHECK-NEXT: ret i1 [[C]]
+;
+ %a = icmp slt i8 %x, 13
+ %b = icmp ugt i8 %x, 17
+ %c = or i1 %a, %b
+ ret i1 %c
+}
+
+; x <s 13 || x <=u 17
+
+define i1 @or_slt_ule(i8 %x) {
+; CHECK-LABEL: @or_slt_ule(
+; CHECK-NEXT: [[A:%.*]] = icmp slt i8 %x, 13
+; CHECK-NEXT: [[B:%.*]] = icmp ule i8 %x, 17
+; CHECK-NEXT: [[C:%.*]] = or i1 [[A]], [[B]]
+; CHECK-NEXT: ret i1 [[C]]
+;
+ %a = icmp slt i8 %x, 13
+ %b = icmp ule i8 %x, 17
+ %c = or i1 %a, %b
+ ret i1 %c
+}
+
+; x <s 13 || x <u 17
+
+define i1 @or_slt_ult(i8 %x) {
+; CHECK-LABEL: @or_slt_ult(
+; CHECK-NEXT: [[A:%.*]] = icmp slt i8 %x, 13
+; CHECK-NEXT: [[B:%.*]] = icmp ult i8 %x, 17
+; CHECK-NEXT: [[C:%.*]] = or i1 [[A]], [[B]]
+; CHECK-NEXT: ret i1 [[C]]
+;
+ %a = icmp slt i8 %x, 13
+ %b = icmp ult i8 %x, 17
+ %c = or i1 %a, %b
+ ret i1 %c
+}
+
+; uge
+; x >=u 13 || x == 17
+
+define i1 @or_uge_eq(i8 %x) {
+; CHECK-LABEL: @or_uge_eq(
+; CHECK-NEXT: [[A:%.*]] = icmp uge i8 %x, 13
+; CHECK-NEXT: ret i1 [[A]]
+;
+ %a = icmp uge i8 %x, 13
+ %b = icmp eq i8 %x, 17
+ %c = or i1 %a, %b
+ ret i1 %c
+}
+
+; x >=u 13 || x != 17
+
+define i1 @or_uge_ne(i8 %x) {
+; CHECK-LABEL: @or_uge_ne(
+; CHECK-NEXT: ret i1 true
+;
+ %a = icmp uge i8 %x, 13
+ %b = icmp ne i8 %x, 17
+ %c = or i1 %a, %b
+ ret i1 %c
+}
+
+; x >=u 13 || x >=s 17
+
+define i1 @or_uge_sge(i8 %x) {
+; CHECK-LABEL: @or_uge_sge(
+; CHECK-NEXT: [[A:%.*]] = icmp uge i8 %x, 13
+; CHECK-NEXT: ret i1 [[A]]
+;
+ %a = icmp uge i8 %x, 13
+ %b = icmp sge i8 %x, 17
+ %c = or i1 %a, %b
+ ret i1 %c
+}
+
+; x >=u 13 || x >s 17
+
+define i1 @or_uge_sgt(i8 %x) {
+; CHECK-LABEL: @or_uge_sgt(
+; CHECK-NEXT: [[A:%.*]] = icmp uge i8 %x, 13
+; CHECK-NEXT: ret i1 [[A]]
+;
+ %a = icmp uge i8 %x, 13
+ %b = icmp sgt i8 %x, 17
+ %c = or i1 %a, %b
+ ret i1 %c
+}
+
+; x >=u 13 || x <=s 17
+
+define i1 @or_uge_sle(i8 %x) {
+; CHECK-LABEL: @or_uge_sle(
+; CHECK-NEXT: ret i1 true
+;
+ %a = icmp uge i8 %x, 13
+ %b = icmp sle i8 %x, 17
+ %c = or i1 %a, %b
+ ret i1 %c
+}
+
+; x >=u 13 || x <s 17
+
+define i1 @or_uge_slt(i8 %x) {
+; CHECK-LABEL: @or_uge_slt(
+; CHECK-NEXT: ret i1 true
+;
+ %a = icmp uge i8 %x, 13
+ %b = icmp slt i8 %x, 17
+ %c = or i1 %a, %b
+ ret i1 %c
+}
+
+; x >=u 13 || x >=u 17
+
+define i1 @or_uge_uge(i8 %x) {
+; CHECK-LABEL: @or_uge_uge(
+; CHECK-NEXT: [[A:%.*]] = icmp uge i8 %x, 13
+; CHECK-NEXT: ret i1 [[A]]
+;
+ %a = icmp uge i8 %x, 13
+ %b = icmp uge i8 %x, 17
+ %c = or i1 %a, %b
+ ret i1 %c
+}
+
+; x >=u 13 || x >u 17
+
+define i1 @or_uge_ugt(i8 %x) {
+; CHECK-LABEL: @or_uge_ugt(
+; CHECK-NEXT: [[A:%.*]] = icmp uge i8 %x, 13
+; CHECK-NEXT: ret i1 [[A]]
+;
+ %a = icmp uge i8 %x, 13
+ %b = icmp ugt i8 %x, 17
+ %c = or i1 %a, %b
+ ret i1 %c
+}
+
+; x >=u 13 || x <=u 17
+
+define i1 @or_uge_ule(i8 %x) {
+; CHECK-LABEL: @or_uge_ule(
+; CHECK-NEXT: ret i1 true
+;
+ %a = icmp uge i8 %x, 13
+ %b = icmp ule i8 %x, 17
+ %c = or i1 %a, %b
+ ret i1 %c
+}
+
+; x >=u 13 || x <u 17
+
+define i1 @or_uge_ult(i8 %x) {
+; CHECK-LABEL: @or_uge_ult(
+; CHECK-NEXT: ret i1 true
+;
+ %a = icmp uge i8 %x, 13
+ %b = icmp ult i8 %x, 17
+ %c = or i1 %a, %b
+ ret i1 %c
+}
+
+; ugt
+; x >u 13 || x == 17
+
+define i1 @or_ugt_eq(i8 %x) {
+; CHECK-LABEL: @or_ugt_eq(
+; CHECK-NEXT: [[A:%.*]] = icmp ugt i8 %x, 13
+; CHECK-NEXT: ret i1 [[A]]
+;
+ %a = icmp ugt i8 %x, 13
+ %b = icmp eq i8 %x, 17
+ %c = or i1 %a, %b
+ ret i1 %c
+}
+
+; x >u 13 || x != 17
+
+define i1 @or_ugt_ne(i8 %x) {
+; CHECK-LABEL: @or_ugt_ne(
+; CHECK-NEXT: ret i1 true
+;
+ %a = icmp ugt i8 %x, 13
+ %b = icmp ne i8 %x, 17
+ %c = or i1 %a, %b
+ ret i1 %c
+}
+
+; x >u 13 || x >=s 17
+
+define i1 @or_ugt_sge(i8 %x) {
+; CHECK-LABEL: @or_ugt_sge(
+; CHECK-NEXT: [[A:%.*]] = icmp ugt i8 %x, 13
+; CHECK-NEXT: ret i1 [[A]]
+;
+ %a = icmp ugt i8 %x, 13
+ %b = icmp sge i8 %x, 17
+ %c = or i1 %a, %b
+ ret i1 %c
+}
+
+; x >u 13 || x >s 17
+
+define i1 @or_ugt_sgt(i8 %x) {
+; CHECK-LABEL: @or_ugt_sgt(
+; CHECK-NEXT: [[A:%.*]] = icmp ugt i8 %x, 13
+; CHECK-NEXT: ret i1 [[A]]
+;
+ %a = icmp ugt i8 %x, 13
+ %b = icmp sgt i8 %x, 17
+ %c = or i1 %a, %b
+ ret i1 %c
+}
+
+; x >u 13 || x <=s 17
+
+define i1 @or_ugt_sle(i8 %x) {
+; CHECK-LABEL: @or_ugt_sle(
+; CHECK-NEXT: ret i1 true
+;
+ %a = icmp ugt i8 %x, 13
+ %b = icmp sle i8 %x, 17
+ %c = or i1 %a, %b
+ ret i1 %c
+}
+
+; x >u 13 || x <s 17
+
+define i1 @or_ugt_slt(i8 %x) {
+; CHECK-LABEL: @or_ugt_slt(
+; CHECK-NEXT: ret i1 true
+;
+ %a = icmp ugt i8 %x, 13
+ %b = icmp slt i8 %x, 17
+ %c = or i1 %a, %b
+ ret i1 %c
+}
+
+; x >u 13 || x >=u 17
+
+define i1 @or_ugt_uge(i8 %x) {
+; CHECK-LABEL: @or_ugt_uge(
+; CHECK-NEXT: [[A:%.*]] = icmp ugt i8 %x, 13
+; CHECK-NEXT: ret i1 [[A]]
+;
+ %a = icmp ugt i8 %x, 13
+ %b = icmp uge i8 %x, 17
+ %c = or i1 %a, %b
+ ret i1 %c
+}
+
+; x >u 13 || x >u 17
+
+define i1 @or_ugt_ugt(i8 %x) {
+; CHECK-LABEL: @or_ugt_ugt(
+; CHECK-NEXT: [[A:%.*]] = icmp ugt i8 %x, 13
+; CHECK-NEXT: ret i1 [[A]]
+;
+ %a = icmp ugt i8 %x, 13
+ %b = icmp ugt i8 %x, 17
+ %c = or i1 %a, %b
+ ret i1 %c
+}
+
+; x >u 13 || x <=u 17
+
+define i1 @or_ugt_ule(i8 %x) {
+; CHECK-LABEL: @or_ugt_ule(
+; CHECK-NEXT: ret i1 true
+;
+ %a = icmp ugt i8 %x, 13
+ %b = icmp ule i8 %x, 17
+ %c = or i1 %a, %b
+ ret i1 %c
+}
+
+; x >u 13 || x <u 17
+
+define i1 @or_ugt_ult(i8 %x) {
+; CHECK-LABEL: @or_ugt_ult(
+; CHECK-NEXT: ret i1 true
+;
+ %a = icmp ugt i8 %x, 13
+ %b = icmp ult i8 %x, 17
+ %c = or i1 %a, %b
+ ret i1 %c
+}
+
+; ule
+; x <=u 13 || x == 17
+
+define i1 @or_ule_eq(i8 %x) {
+; CHECK-LABEL: @or_ule_eq(
+; CHECK-NEXT: [[A:%.*]] = icmp ule i8 %x, 13
+; CHECK-NEXT: [[B:%.*]] = icmp eq i8 %x, 17
+; CHECK-NEXT: [[C:%.*]] = or i1 [[A]], [[B]]
+; CHECK-NEXT: ret i1 [[C]]
+;
+ %a = icmp ule i8 %x, 13
+ %b = icmp eq i8 %x, 17
+ %c = or i1 %a, %b
+ ret i1 %c
+}
+
+; x <=u 13 || x != 17
+
+define i1 @or_ule_ne(i8 %x) {
+; CHECK-LABEL: @or_ule_ne(
+; CHECK-NEXT: [[B:%.*]] = icmp ne i8 %x, 17
+; CHECK-NEXT: ret i1 [[B]]
+;
+ %a = icmp ule i8 %x, 13
+ %b = icmp ne i8 %x, 17
+ %c = or i1 %a, %b
+ ret i1 %c
+}
+
+; x <=u 13 || x >=s 17
+
+define i1 @or_ule_sge(i8 %x) {
+; CHECK-LABEL: @or_ule_sge(
+; CHECK-NEXT: [[A:%.*]] = icmp ule i8 %x, 13
+; CHECK-NEXT: [[B:%.*]] = icmp sge i8 %x, 17
+; CHECK-NEXT: [[C:%.*]] = or i1 [[A]], [[B]]
+; CHECK-NEXT: ret i1 [[C]]
+;
+ %a = icmp ule i8 %x, 13
+ %b = icmp sge i8 %x, 17
+ %c = or i1 %a, %b
+ ret i1 %c
+}
+
+; x <=u 13 || x >s 17
+
+define i1 @or_ule_sgt(i8 %x) {
+; CHECK-LABEL: @or_ule_sgt(
+; CHECK-NEXT: [[A:%.*]] = icmp ule i8 %x, 13
+; CHECK-NEXT: [[B:%.*]] = icmp sgt i8 %x, 17
+; CHECK-NEXT: [[C:%.*]] = or i1 [[A]], [[B]]
+; CHECK-NEXT: ret i1 [[C]]
+;
+ %a = icmp ule i8 %x, 13
+ %b = icmp sgt i8 %x, 17
+ %c = or i1 %a, %b
+ ret i1 %c
+}
+
+; x <=u 13 || x <=s 17
+
+define i1 @or_ule_sle(i8 %x) {
+; CHECK-LABEL: @or_ule_sle(
+; CHECK-NEXT: [[B:%.*]] = icmp sle i8 %x, 17
+; CHECK-NEXT: ret i1 [[B]]
+;
+ %a = icmp ule i8 %x, 13
+ %b = icmp sle i8 %x, 17
+ %c = or i1 %a, %b
+ ret i1 %c
+}
+
+; x <=u 13 || x <s 17
+
+define i1 @or_ule_slt(i8 %x) {
+; CHECK-LABEL: @or_ule_slt(
+; CHECK-NEXT: [[B:%.*]] = icmp slt i8 %x, 17
+; CHECK-NEXT: ret i1 [[B]]
+;
+ %a = icmp ule i8 %x, 13
+ %b = icmp slt i8 %x, 17
+ %c = or i1 %a, %b
+ ret i1 %c
+}
+
+; x <=u 13 || x >=u 17
+
+define i1 @or_ule_uge(i8 %x) {
+; CHECK-LABEL: @or_ule_uge(
+; CHECK-NEXT: [[A:%.*]] = icmp ule i8 %x, 13
+; CHECK-NEXT: [[B:%.*]] = icmp uge i8 %x, 17
+; CHECK-NEXT: [[C:%.*]] = or i1 [[A]], [[B]]
+; CHECK-NEXT: ret i1 [[C]]
+;
+ %a = icmp ule i8 %x, 13
+ %b = icmp uge i8 %x, 17
+ %c = or i1 %a, %b
+ ret i1 %c
+}
+
+; x <=u 13 || x >u 17
+
+define i1 @or_ule_ugt(i8 %x) {
+; CHECK-LABEL: @or_ule_ugt(
+; CHECK-NEXT: [[A:%.*]] = icmp ule i8 %x, 13
+; CHECK-NEXT: [[B:%.*]] = icmp ugt i8 %x, 17
+; CHECK-NEXT: [[C:%.*]] = or i1 [[A]], [[B]]
+; CHECK-NEXT: ret i1 [[C]]
+;
+ %a = icmp ule i8 %x, 13
+ %b = icmp ugt i8 %x, 17
+ %c = or i1 %a, %b
+ ret i1 %c
+}
+
+; x <=u 13 || x <=u 17
+
+define i1 @or_ule_ule(i8 %x) {
+; CHECK-LABEL: @or_ule_ule(
+; CHECK-NEXT: [[B:%.*]] = icmp ule i8 %x, 17
+; CHECK-NEXT: ret i1 [[B]]
+;
+ %a = icmp ule i8 %x, 13
+ %b = icmp ule i8 %x, 17
+ %c = or i1 %a, %b
+ ret i1 %c
+}
+
+; x <=u 13 || x <u 17
+
+define i1 @or_ule_ult(i8 %x) {
+; CHECK-LABEL: @or_ule_ult(
+; CHECK-NEXT: [[B:%.*]] = icmp ult i8 %x, 17
+; CHECK-NEXT: ret i1 [[B]]
+;
+ %a = icmp ule i8 %x, 13
+ %b = icmp ult i8 %x, 17
+ %c = or i1 %a, %b
+ ret i1 %c
+}
+
+; ult
+; x <u 13 || x == 17
+
+define i1 @or_ult_eq(i8 %x) {
+; CHECK-LABEL: @or_ult_eq(
+; CHECK-NEXT: [[A:%.*]] = icmp ult i8 %x, 13
+; CHECK-NEXT: [[B:%.*]] = icmp eq i8 %x, 17
+; CHECK-NEXT: [[C:%.*]] = or i1 [[A]], [[B]]
+; CHECK-NEXT: ret i1 [[C]]
+;
+ %a = icmp ult i8 %x, 13
+ %b = icmp eq i8 %x, 17
+ %c = or i1 %a, %b
+ ret i1 %c
+}
+
+; x <u 13 || x != 17
+
+define i1 @or_ult_ne(i8 %x) {
+; CHECK-LABEL: @or_ult_ne(
+; CHECK-NEXT: [[B:%.*]] = icmp ne i8 %x, 17
+; CHECK-NEXT: ret i1 [[B]]
+;
+ %a = icmp ult i8 %x, 13
+ %b = icmp ne i8 %x, 17
+ %c = or i1 %a, %b
+ ret i1 %c
+}
+
+; x <u 13 || x >=s 17
+
+define i1 @or_ult_sge(i8 %x) {
+; CHECK-LABEL: @or_ult_sge(
+; CHECK-NEXT: [[A:%.*]] = icmp ult i8 %x, 13
+; CHECK-NEXT: [[B:%.*]] = icmp sge i8 %x, 17
+; CHECK-NEXT: [[C:%.*]] = or i1 [[A]], [[B]]
+; CHECK-NEXT: ret i1 [[C]]
+;
+ %a = icmp ult i8 %x, 13
+ %b = icmp sge i8 %x, 17
+ %c = or i1 %a, %b
+ ret i1 %c
+}
+
+; x <u 13 || x >s 17
+
+define i1 @or_ult_sgt(i8 %x) {
+; CHECK-LABEL: @or_ult_sgt(
+; CHECK-NEXT: [[A:%.*]] = icmp ult i8 %x, 13
+; CHECK-NEXT: [[B:%.*]] = icmp sgt i8 %x, 17
+; CHECK-NEXT: [[C:%.*]] = or i1 [[A]], [[B]]
+; CHECK-NEXT: ret i1 [[C]]
+;
+ %a = icmp ult i8 %x, 13
+ %b = icmp sgt i8 %x, 17
+ %c = or i1 %a, %b
+ ret i1 %c
+}
+
+; x <u 13 || x <=s 17
+
+define i1 @or_ult_sle(i8 %x) {
+; CHECK-LABEL: @or_ult_sle(
+; CHECK-NEXT: [[B:%.*]] = icmp sle i8 %x, 17
+; CHECK-NEXT: ret i1 [[B]]
+;
+ %a = icmp ult i8 %x, 13
+ %b = icmp sle i8 %x, 17
+ %c = or i1 %a, %b
+ ret i1 %c
+}
+
+; x <u 13 || x <s 17
+
+define i1 @or_ult_slt(i8 %x) {
+; CHECK-LABEL: @or_ult_slt(
+; CHECK-NEXT: [[B:%.*]] = icmp slt i8 %x, 17
+; CHECK-NEXT: ret i1 [[B]]
+;
+ %a = icmp ult i8 %x, 13
+ %b = icmp slt i8 %x, 17
+ %c = or i1 %a, %b
+ ret i1 %c
+}
+
+; x <u 13 || x >=u 17
+
+define i1 @or_ult_uge(i8 %x) {
+; CHECK-LABEL: @or_ult_uge(
+; CHECK-NEXT: [[A:%.*]] = icmp ult i8 %x, 13
+; CHECK-NEXT: [[B:%.*]] = icmp uge i8 %x, 17
+; CHECK-NEXT: [[C:%.*]] = or i1 [[A]], [[B]]
+; CHECK-NEXT: ret i1 [[C]]
+;
+ %a = icmp ult i8 %x, 13
+ %b = icmp uge i8 %x, 17
+ %c = or i1 %a, %b
+ ret i1 %c
+}
+
+; x <u 13 || x >u 17
+
+define i1 @or_ult_ugt(i8 %x) {
+; CHECK-LABEL: @or_ult_ugt(
+; CHECK-NEXT: [[A:%.*]] = icmp ult i8 %x, 13
+; CHECK-NEXT: [[B:%.*]] = icmp ugt i8 %x, 17
+; CHECK-NEXT: [[C:%.*]] = or i1 [[A]], [[B]]
+; CHECK-NEXT: ret i1 [[C]]
+;
+ %a = icmp ult i8 %x, 13
+ %b = icmp ugt i8 %x, 17
+ %c = or i1 %a, %b
+ ret i1 %c
+}
+
+; x <u 13 || x <=u 17
+
+define i1 @or_ult_ule(i8 %x) {
+; CHECK-LABEL: @or_ult_ule(
+; CHECK-NEXT: [[B:%.*]] = icmp ule i8 %x, 17
+; CHECK-NEXT: ret i1 [[B]]
+;
+ %a = icmp ult i8 %x, 13
+ %b = icmp ule i8 %x, 17
+ %c = or i1 %a, %b
+ ret i1 %c
+}
+
+; x <u 13 || x <u 17
+
+define i1 @or_ult_ult(i8 %x) {
+; CHECK-LABEL: @or_ult_ult(
+; CHECK-NEXT: [[B:%.*]] = icmp ult i8 %x, 17
+; CHECK-NEXT: ret i1 [[B]]
+;
+ %a = icmp ult i8 %x, 13
+ %b = icmp ult i8 %x, 17
+ %c = or i1 %a, %b
+ ret i1 %c
+}
+
+; eq
+; x == 23 || x == 17
+
+define i1 @or_eq_eq_swap(i8 %x) {
+; CHECK-LABEL: @or_eq_eq_swap(
+; CHECK-NEXT: [[A:%.*]] = icmp eq i8 %x, 23
+; CHECK-NEXT: [[B:%.*]] = icmp eq i8 %x, 17
+; CHECK-NEXT: [[C:%.*]] = or i1 [[A]], [[B]]
+; CHECK-NEXT: ret i1 [[C]]
+;
+ %a = icmp eq i8 %x, 23
+ %b = icmp eq i8 %x, 17
+ %c = or i1 %a, %b
+ ret i1 %c
+}
+
+; x == 23 || x != 17
+
+define i1 @or_eq_ne_swap(i8 %x) {
+; CHECK-LABEL: @or_eq_ne_swap(
+; CHECK-NEXT: [[B:%.*]] = icmp ne i8 %x, 17
+; CHECK-NEXT: ret i1 [[B]]
+;
+ %a = icmp eq i8 %x, 23
+ %b = icmp ne i8 %x, 17
+ %c = or i1 %a, %b
+ ret i1 %c
+}
+
+; x == 23 || x >=s 17
+
+define i1 @or_eq_sge_swap(i8 %x) {
+; CHECK-LABEL: @or_eq_sge_swap(
+; CHECK-NEXT: [[B:%.*]] = icmp sge i8 %x, 17
+; CHECK-NEXT: ret i1 [[B]]
+;
+ %a = icmp eq i8 %x, 23
+ %b = icmp sge i8 %x, 17
+ %c = or i1 %a, %b
+ ret i1 %c
+}
+
+; x == 23 || x >s 17
+
+define i1 @or_eq_sgt_swap(i8 %x) {
+; CHECK-LABEL: @or_eq_sgt_swap(
+; CHECK-NEXT: [[B:%.*]] = icmp sgt i8 %x, 17
+; CHECK-NEXT: ret i1 [[B]]
+;
+ %a = icmp eq i8 %x, 23
+ %b = icmp sgt i8 %x, 17
+ %c = or i1 %a, %b
+ ret i1 %c
+}
+
+; x == 23 || x <=s 17
+
+define i1 @or_eq_sle_swap(i8 %x) {
+; CHECK-LABEL: @or_eq_sle_swap(
+; CHECK-NEXT: [[A:%.*]] = icmp eq i8 %x, 23
+; CHECK-NEXT: [[B:%.*]] = icmp sle i8 %x, 17
+; CHECK-NEXT: [[C:%.*]] = or i1 [[A]], [[B]]
+; CHECK-NEXT: ret i1 [[C]]
+;
+ %a = icmp eq i8 %x, 23
+ %b = icmp sle i8 %x, 17
+ %c = or i1 %a, %b
+ ret i1 %c
+}
+
+; x == 23 || x <s 17
+
+define i1 @or_eq_slt_swap(i8 %x) {
+; CHECK-LABEL: @or_eq_slt_swap(
+; CHECK-NEXT: [[A:%.*]] = icmp eq i8 %x, 23
+; CHECK-NEXT: [[B:%.*]] = icmp slt i8 %x, 17
+; CHECK-NEXT: [[C:%.*]] = or i1 [[A]], [[B]]
+; CHECK-NEXT: ret i1 [[C]]
+;
+ %a = icmp eq i8 %x, 23
+ %b = icmp slt i8 %x, 17
+ %c = or i1 %a, %b
+ ret i1 %c
+}
+
+; x == 23 || x >=u 17
+
+define i1 @or_eq_uge_swap(i8 %x) {
+; CHECK-LABEL: @or_eq_uge_swap(
+; CHECK-NEXT: [[B:%.*]] = icmp uge i8 %x, 17
+; CHECK-NEXT: ret i1 [[B]]
+;
+ %a = icmp eq i8 %x, 23
+ %b = icmp uge i8 %x, 17
+ %c = or i1 %a, %b
+ ret i1 %c
+}
+
+; x == 23 || x >u 17
+
+define i1 @or_eq_ugt_swap(i8 %x) {
+; CHECK-LABEL: @or_eq_ugt_swap(
+; CHECK-NEXT: [[B:%.*]] = icmp ugt i8 %x, 17
+; CHECK-NEXT: ret i1 [[B]]
+;
+ %a = icmp eq i8 %x, 23
+ %b = icmp ugt i8 %x, 17
+ %c = or i1 %a, %b
+ ret i1 %c
+}
+
+; x == 23 || x <=u 17
+
+define i1 @or_eq_ule_swap(i8 %x) {
+; CHECK-LABEL: @or_eq_ule_swap(
+; CHECK-NEXT: [[A:%.*]] = icmp eq i8 %x, 23
+; CHECK-NEXT: [[B:%.*]] = icmp ule i8 %x, 17
+; CHECK-NEXT: [[C:%.*]] = or i1 [[A]], [[B]]
+; CHECK-NEXT: ret i1 [[C]]
+;
+ %a = icmp eq i8 %x, 23
+ %b = icmp ule i8 %x, 17
+ %c = or i1 %a, %b
+ ret i1 %c
+}
+
+; x == 23 || x <u 17
+
+define i1 @or_eq_ult_swap(i8 %x) {
+; CHECK-LABEL: @or_eq_ult_swap(
+; CHECK-NEXT: [[A:%.*]] = icmp eq i8 %x, 23
+; CHECK-NEXT: [[B:%.*]] = icmp ult i8 %x, 17
+; CHECK-NEXT: [[C:%.*]] = or i1 [[A]], [[B]]
+; CHECK-NEXT: ret i1 [[C]]
+;
+ %a = icmp eq i8 %x, 23
+ %b = icmp ult i8 %x, 17
+ %c = or i1 %a, %b
+ ret i1 %c
+}
+
+; ne
+; x != 23 || x == 17
+
+define i1 @or_ne_eq_swap(i8 %x) {
+; CHECK-LABEL: @or_ne_eq_swap(
+; CHECK-NEXT: [[A:%.*]] = icmp ne i8 %x, 23
+; CHECK-NEXT: ret i1 [[A]]
+;
+ %a = icmp ne i8 %x, 23
+ %b = icmp eq i8 %x, 17
+ %c = or i1 %a, %b
+ ret i1 %c
+}
+
+; x != 23 || x != 17
+
+define i1 @or_ne_ne_swap(i8 %x) {
+; CHECK-LABEL: @or_ne_ne_swap(
+; CHECK-NEXT: ret i1 true
+;
+ %a = icmp ne i8 %x, 23
+ %b = icmp ne i8 %x, 17
+ %c = or i1 %a, %b
+ ret i1 %c
+}
+
+; x != 23 || x >=s 17
+
+define i1 @or_ne_sge_swap(i8 %x) {
+; CHECK-LABEL: @or_ne_sge_swap(
+; CHECK-NEXT: ret i1 true
+;
+ %a = icmp ne i8 %x, 23
+ %b = icmp sge i8 %x, 17
+ %c = or i1 %a, %b
+ ret i1 %c
+}
+
+; x != 23 || x >s 17
+
+define i1 @or_ne_sgt_swap(i8 %x) {
+; CHECK-LABEL: @or_ne_sgt_swap(
+; CHECK-NEXT: ret i1 true
+;
+ %a = icmp ne i8 %x, 23
+ %b = icmp sgt i8 %x, 17
+ %c = or i1 %a, %b
+ ret i1 %c
+}
+
+; x != 23 || x <=s 17
+
+define i1 @or_ne_sle_swap(i8 %x) {
+; CHECK-LABEL: @or_ne_sle_swap(
+; CHECK-NEXT: [[A:%.*]] = icmp ne i8 %x, 23
+; CHECK-NEXT: ret i1 [[A]]
+;
+ %a = icmp ne i8 %x, 23
+ %b = icmp sle i8 %x, 17
+ %c = or i1 %a, %b
+ ret i1 %c
+}
+
+; x != 23 || x <s 17
+
+define i1 @or_ne_slt_swap(i8 %x) {
+; CHECK-LABEL: @or_ne_slt_swap(
+; CHECK-NEXT: [[A:%.*]] = icmp ne i8 %x, 23
+; CHECK-NEXT: ret i1 [[A]]
+;
+ %a = icmp ne i8 %x, 23
+ %b = icmp slt i8 %x, 17
+ %c = or i1 %a, %b
+ ret i1 %c
+}
+
+; x != 23 || x >=u 17
+
+define i1 @or_ne_uge_swap(i8 %x) {
+; CHECK-LABEL: @or_ne_uge_swap(
+; CHECK-NEXT: ret i1 true
+;
+ %a = icmp ne i8 %x, 23
+ %b = icmp uge i8 %x, 17
+ %c = or i1 %a, %b
+ ret i1 %c
+}
+
+; x != 23 || x >u 17
+
+define i1 @or_ne_ugt_swap(i8 %x) {
+; CHECK-LABEL: @or_ne_ugt_swap(
+; CHECK-NEXT: ret i1 true
+;
+ %a = icmp ne i8 %x, 23
+ %b = icmp ugt i8 %x, 17
+ %c = or i1 %a, %b
+ ret i1 %c
+}
+
+; x != 23 || x <=u 17
+
+define i1 @or_ne_ule_swap(i8 %x) {
+; CHECK-LABEL: @or_ne_ule_swap(
+; CHECK-NEXT: [[A:%.*]] = icmp ne i8 %x, 23
+; CHECK-NEXT: ret i1 [[A]]
+;
+ %a = icmp ne i8 %x, 23
+ %b = icmp ule i8 %x, 17
+ %c = or i1 %a, %b
+ ret i1 %c
+}
+
+; x != 23 || x <u 17
+
+define i1 @or_ne_ult_swap(i8 %x) {
+; CHECK-LABEL: @or_ne_ult_swap(
+; CHECK-NEXT: [[A:%.*]] = icmp ne i8 %x, 23
+; CHECK-NEXT: ret i1 [[A]]
+;
+ %a = icmp ne i8 %x, 23
+ %b = icmp ult i8 %x, 17
+ %c = or i1 %a, %b
+ ret i1 %c
+}
+
+; sge
+; x >=s 23 || x == 17
+
+define i1 @or_sge_eq_swap(i8 %x) {
+; CHECK-LABEL: @or_sge_eq_swap(
+; CHECK-NEXT: [[A:%.*]] = icmp sge i8 %x, 23
+; CHECK-NEXT: [[B:%.*]] = icmp eq i8 %x, 17
+; CHECK-NEXT: [[C:%.*]] = or i1 [[A]], [[B]]
+; CHECK-NEXT: ret i1 [[C]]
+;
+ %a = icmp sge i8 %x, 23
+ %b = icmp eq i8 %x, 17
+ %c = or i1 %a, %b
+ ret i1 %c
+}
+
+; x >=s 23 || x != 17
+
+define i1 @or_sge_ne_swap(i8 %x) {
+; CHECK-LABEL: @or_sge_ne_swap(
+; CHECK-NEXT: [[B:%.*]] = icmp ne i8 %x, 17
+; CHECK-NEXT: ret i1 [[B]]
+;
+ %a = icmp sge i8 %x, 23
+ %b = icmp ne i8 %x, 17
+ %c = or i1 %a, %b
+ ret i1 %c
+}
+
+; x >=s 23 || x >=s 17
+
+define i1 @or_sge_sge_swap(i8 %x) {
+; CHECK-LABEL: @or_sge_sge_swap(
+; CHECK-NEXT: [[B:%.*]] = icmp sge i8 %x, 17
+; CHECK-NEXT: ret i1 [[B]]
+;
+ %a = icmp sge i8 %x, 23
+ %b = icmp sge i8 %x, 17
+ %c = or i1 %a, %b
+ ret i1 %c
+}
+
+; x >=s 23 || x >s 17
+
+define i1 @or_sge_sgt_swap(i8 %x) {
+; CHECK-LABEL: @or_sge_sgt_swap(
+; CHECK-NEXT: [[B:%.*]] = icmp sgt i8 %x, 17
+; CHECK-NEXT: ret i1 [[B]]
+;
+ %a = icmp sge i8 %x, 23
+ %b = icmp sgt i8 %x, 17
+ %c = or i1 %a, %b
+ ret i1 %c
+}
+
+; x >=s 23 || x <=s 17
+
+define i1 @or_sge_sle_swap(i8 %x) {
+; CHECK-LABEL: @or_sge_sle_swap(
+; CHECK-NEXT: [[A:%.*]] = icmp sge i8 %x, 23
+; CHECK-NEXT: [[B:%.*]] = icmp sle i8 %x, 17
+; CHECK-NEXT: [[C:%.*]] = or i1 [[A]], [[B]]
+; CHECK-NEXT: ret i1 [[C]]
+;
+ %a = icmp sge i8 %x, 23
+ %b = icmp sle i8 %x, 17
+ %c = or i1 %a, %b
+ ret i1 %c
+}
+
+; x >=s 23 || x <s 17
+
+define i1 @or_sge_slt_swap(i8 %x) {
+; CHECK-LABEL: @or_sge_slt_swap(
+; CHECK-NEXT: [[A:%.*]] = icmp sge i8 %x, 23
+; CHECK-NEXT: [[B:%.*]] = icmp slt i8 %x, 17
+; CHECK-NEXT: [[C:%.*]] = or i1 [[A]], [[B]]
+; CHECK-NEXT: ret i1 [[C]]
+;
+ %a = icmp sge i8 %x, 23
+ %b = icmp slt i8 %x, 17
+ %c = or i1 %a, %b
+ ret i1 %c
+}
+
+; x >=s 23 || x >=u 17
+
+define i1 @or_sge_uge_swap(i8 %x) {
+; CHECK-LABEL: @or_sge_uge_swap(
+; CHECK-NEXT: [[B:%.*]] = icmp uge i8 %x, 17
+; CHECK-NEXT: ret i1 [[B]]
+;
+ %a = icmp sge i8 %x, 23
+ %b = icmp uge i8 %x, 17
+ %c = or i1 %a, %b
+ ret i1 %c
+}
+
+; x >=s 23 || x >u 17
+
+define i1 @or_sge_ugt_swap(i8 %x) {
+; CHECK-LABEL: @or_sge_ugt_swap(
+; CHECK-NEXT: [[B:%.*]] = icmp ugt i8 %x, 17
+; CHECK-NEXT: ret i1 [[B]]
+;
+ %a = icmp sge i8 %x, 23
+ %b = icmp ugt i8 %x, 17
+ %c = or i1 %a, %b
+ ret i1 %c
+}
+
+; x >=s 23 || x <=u 17
+
+define i1 @or_sge_ule_swap(i8 %x) {
+; CHECK-LABEL: @or_sge_ule_swap(
+; CHECK-NEXT: [[A:%.*]] = icmp sge i8 %x, 23
+; CHECK-NEXT: [[B:%.*]] = icmp ule i8 %x, 17
+; CHECK-NEXT: [[C:%.*]] = or i1 [[A]], [[B]]
+; CHECK-NEXT: ret i1 [[C]]
+;
+ %a = icmp sge i8 %x, 23
+ %b = icmp ule i8 %x, 17
+ %c = or i1 %a, %b
+ ret i1 %c
+}
+
+; x >=s 23 || x <u 17
+
+define i1 @or_sge_ult_swap(i8 %x) {
+; CHECK-LABEL: @or_sge_ult_swap(
+; CHECK-NEXT: [[A:%.*]] = icmp sge i8 %x, 23
+; CHECK-NEXT: [[B:%.*]] = icmp ult i8 %x, 17
+; CHECK-NEXT: [[C:%.*]] = or i1 [[A]], [[B]]
+; CHECK-NEXT: ret i1 [[C]]
+;
+ %a = icmp sge i8 %x, 23
+ %b = icmp ult i8 %x, 17
+ %c = or i1 %a, %b
+ ret i1 %c
+}
+
+; sgt
+; x >s 23 || x == 17
+
+define i1 @or_sgt_eq_swap(i8 %x) {
+; CHECK-LABEL: @or_sgt_eq_swap(
+; CHECK-NEXT: [[A:%.*]] = icmp sgt i8 %x, 23
+; CHECK-NEXT: [[B:%.*]] = icmp eq i8 %x, 17
+; CHECK-NEXT: [[C:%.*]] = or i1 [[A]], [[B]]
+; CHECK-NEXT: ret i1 [[C]]
+;
+ %a = icmp sgt i8 %x, 23
+ %b = icmp eq i8 %x, 17
+ %c = or i1 %a, %b
+ ret i1 %c
+}
+
+; x >s 23 || x != 17
+
+define i1 @or_sgt_ne_swap(i8 %x) {
+; CHECK-LABEL: @or_sgt_ne_swap(
+; CHECK-NEXT: [[B:%.*]] = icmp ne i8 %x, 17
+; CHECK-NEXT: ret i1 [[B]]
+;
+ %a = icmp sgt i8 %x, 23
+ %b = icmp ne i8 %x, 17
+ %c = or i1 %a, %b
+ ret i1 %c
+}
+
+; x >s 23 || x >=s 17
+
+define i1 @or_sgt_sge_swap(i8 %x) {
+; CHECK-LABEL: @or_sgt_sge_swap(
+; CHECK-NEXT: [[B:%.*]] = icmp sge i8 %x, 17
+; CHECK-NEXT: ret i1 [[B]]
+;
+ %a = icmp sgt i8 %x, 23
+ %b = icmp sge i8 %x, 17
+ %c = or i1 %a, %b
+ ret i1 %c
+}
+
+; x >s 23 || x >s 17
+
+define i1 @or_sgt_sgt_swap(i8 %x) {
+; CHECK-LABEL: @or_sgt_sgt_swap(
+; CHECK-NEXT: [[B:%.*]] = icmp sgt i8 %x, 17
+; CHECK-NEXT: ret i1 [[B]]
+;
+ %a = icmp sgt i8 %x, 23
+ %b = icmp sgt i8 %x, 17
+ %c = or i1 %a, %b
+ ret i1 %c
+}
+
+; x >s 23 || x <=s 17
+
+define i1 @or_sgt_sle_swap(i8 %x) {
+; CHECK-LABEL: @or_sgt_sle_swap(
+; CHECK-NEXT: [[A:%.*]] = icmp sgt i8 %x, 23
+; CHECK-NEXT: [[B:%.*]] = icmp sle i8 %x, 17
+; CHECK-NEXT: [[C:%.*]] = or i1 [[A]], [[B]]
+; CHECK-NEXT: ret i1 [[C]]
+;
+ %a = icmp sgt i8 %x, 23
+ %b = icmp sle i8 %x, 17
+ %c = or i1 %a, %b
+ ret i1 %c
+}
+
+; x >s 23 || x <s 17
+
+define i1 @or_sgt_slt_swap(i8 %x) {
+; CHECK-LABEL: @or_sgt_slt_swap(
+; CHECK-NEXT: [[A:%.*]] = icmp sgt i8 %x, 23
+; CHECK-NEXT: [[B:%.*]] = icmp slt i8 %x, 17
+; CHECK-NEXT: [[C:%.*]] = or i1 [[A]], [[B]]
+; CHECK-NEXT: ret i1 [[C]]
+;
+ %a = icmp sgt i8 %x, 23
+ %b = icmp slt i8 %x, 17
+ %c = or i1 %a, %b
+ ret i1 %c
+}
+
+; x >s 23 || x >=u 17
+
+define i1 @or_sgt_uge_swap(i8 %x) {
+; CHECK-LABEL: @or_sgt_uge_swap(
+; CHECK-NEXT: [[B:%.*]] = icmp uge i8 %x, 17
+; CHECK-NEXT: ret i1 [[B]]
+;
+ %a = icmp sgt i8 %x, 23
+ %b = icmp uge i8 %x, 17
+ %c = or i1 %a, %b
+ ret i1 %c
+}
+
+; x >s 23 || x >u 17
+
+define i1 @or_sgt_ugt_swap(i8 %x) {
+; CHECK-LABEL: @or_sgt_ugt_swap(
+; CHECK-NEXT: [[B:%.*]] = icmp ugt i8 %x, 17
+; CHECK-NEXT: ret i1 [[B]]
+;
+ %a = icmp sgt i8 %x, 23
+ %b = icmp ugt i8 %x, 17
+ %c = or i1 %a, %b
+ ret i1 %c
+}
+
+; x >s 23 || x <=u 17
+
+define i1 @or_sgt_ule_swap(i8 %x) {
+; CHECK-LABEL: @or_sgt_ule_swap(
+; CHECK-NEXT: [[A:%.*]] = icmp sgt i8 %x, 23
+; CHECK-NEXT: [[B:%.*]] = icmp ule i8 %x, 17
+; CHECK-NEXT: [[C:%.*]] = or i1 [[A]], [[B]]
+; CHECK-NEXT: ret i1 [[C]]
+;
+ %a = icmp sgt i8 %x, 23
+ %b = icmp ule i8 %x, 17
+ %c = or i1 %a, %b
+ ret i1 %c
+}
+
+; x >s 23 || x <u 17
+
+define i1 @or_sgt_ult_swap(i8 %x) {
+; CHECK-LABEL: @or_sgt_ult_swap(
+; CHECK-NEXT: [[A:%.*]] = icmp sgt i8 %x, 23
+; CHECK-NEXT: [[B:%.*]] = icmp ult i8 %x, 17
+; CHECK-NEXT: [[C:%.*]] = or i1 [[A]], [[B]]
+; CHECK-NEXT: ret i1 [[C]]
+;
+ %a = icmp sgt i8 %x, 23
+ %b = icmp ult i8 %x, 17
+ %c = or i1 %a, %b
+ ret i1 %c
+}
+
+; sle
+; x <=s 23 || x == 17
+
+define i1 @or_sle_eq_swap(i8 %x) {
+; CHECK-LABEL: @or_sle_eq_swap(
+; CHECK-NEXT: [[A:%.*]] = icmp sle i8 %x, 23
+; CHECK-NEXT: ret i1 [[A]]
+;
+ %a = icmp sle i8 %x, 23
+ %b = icmp eq i8 %x, 17
+ %c = or i1 %a, %b
+ ret i1 %c
+}
+
+; x <=s 23 || x != 17
+
+define i1 @or_sle_ne_swap(i8 %x) {
+; CHECK-LABEL: @or_sle_ne_swap(
+; CHECK-NEXT: ret i1 true
+;
+ %a = icmp sle i8 %x, 23
+ %b = icmp ne i8 %x, 17
+ %c = or i1 %a, %b
+ ret i1 %c
+}
+
+; x <=s 23 || x >=s 17
+
+define i1 @or_sle_sge_swap(i8 %x) {
+; CHECK-LABEL: @or_sle_sge_swap(
+; CHECK-NEXT: ret i1 true
+;
+ %a = icmp sle i8 %x, 23
+ %b = icmp sge i8 %x, 17
+ %c = or i1 %a, %b
+ ret i1 %c
+}
+
+; x <=s 23 || x >s 17
+
+define i1 @or_sle_sgt_swap(i8 %x) {
+; CHECK-LABEL: @or_sle_sgt_swap(
+; CHECK-NEXT: ret i1 true
+;
+ %a = icmp sle i8 %x, 23
+ %b = icmp sgt i8 %x, 17
+ %c = or i1 %a, %b
+ ret i1 %c
+}
+
+; x <=s 23 || x <=s 17
+
+define i1 @or_sle_sle_swap(i8 %x) {
+; CHECK-LABEL: @or_sle_sle_swap(
+; CHECK-NEXT: [[A:%.*]] = icmp sle i8 %x, 23
+; CHECK-NEXT: ret i1 [[A]]
+;
+ %a = icmp sle i8 %x, 23
+ %b = icmp sle i8 %x, 17
+ %c = or i1 %a, %b
+ ret i1 %c
+}
+
+; x <=s 23 || x <s 17
+
+define i1 @or_sle_slt_swap(i8 %x) {
+; CHECK-LABEL: @or_sle_slt_swap(
+; CHECK-NEXT: [[A:%.*]] = icmp sle i8 %x, 23
+; CHECK-NEXT: ret i1 [[A]]
+;
+ %a = icmp sle i8 %x, 23
+ %b = icmp slt i8 %x, 17
+ %c = or i1 %a, %b
+ ret i1 %c
+}
+
+; x <=s 23 || x >=u 17
+
+define i1 @or_sle_uge_swap(i8 %x) {
+; CHECK-LABEL: @or_sle_uge_swap(
+; CHECK-NEXT: ret i1 true
+;
+ %a = icmp sle i8 %x, 23
+ %b = icmp uge i8 %x, 17
+ %c = or i1 %a, %b
+ ret i1 %c
+}
+
+; x <=s 23 || x >u 17
+
+define i1 @or_sle_ugt_swap(i8 %x) {
+; CHECK-LABEL: @or_sle_ugt_swap(
+; CHECK-NEXT: ret i1 true
+;
+ %a = icmp sle i8 %x, 23
+ %b = icmp ugt i8 %x, 17
+ %c = or i1 %a, %b
+ ret i1 %c
+}
+
+; x <=s 23 || x <=u 17
+
+define i1 @or_sle_ule_swap(i8 %x) {
+; CHECK-LABEL: @or_sle_ule_swap(
+; CHECK-NEXT: [[A:%.*]] = icmp sle i8 %x, 23
+; CHECK-NEXT: ret i1 [[A]]
+;
+ %a = icmp sle i8 %x, 23
+ %b = icmp ule i8 %x, 17
+ %c = or i1 %a, %b
+ ret i1 %c
+}
+
+; x <=s 23 || x <u 17
+
+define i1 @or_sle_ult_swap(i8 %x) {
+; CHECK-LABEL: @or_sle_ult_swap(
+; CHECK-NEXT: [[A:%.*]] = icmp sle i8 %x, 23
+; CHECK-NEXT: ret i1 [[A]]
+;
+ %a = icmp sle i8 %x, 23
+ %b = icmp ult i8 %x, 17
+ %c = or i1 %a, %b
+ ret i1 %c
+}
+
+; slt
+; x <s 23 || x == 17
+
+define i1 @or_slt_eq_swap(i8 %x) {
+; CHECK-LABEL: @or_slt_eq_swap(
+; CHECK-NEXT: [[A:%.*]] = icmp slt i8 %x, 23
+; CHECK-NEXT: ret i1 [[A]]
+;
+ %a = icmp slt i8 %x, 23
+ %b = icmp eq i8 %x, 17
+ %c = or i1 %a, %b
+ ret i1 %c
+}
+
+; x <s 23 || x != 17
+
+define i1 @or_slt_ne_swap(i8 %x) {
+; CHECK-LABEL: @or_slt_ne_swap(
+; CHECK-NEXT: ret i1 true
+;
+ %a = icmp slt i8 %x, 23
+ %b = icmp ne i8 %x, 17
+ %c = or i1 %a, %b
+ ret i1 %c
+}
+
+; x <s 23 || x >=s 17
+
+define i1 @or_slt_sge_swap(i8 %x) {
+; CHECK-LABEL: @or_slt_sge_swap(
+; CHECK-NEXT: ret i1 true
+;
+ %a = icmp slt i8 %x, 23
+ %b = icmp sge i8 %x, 17
+ %c = or i1 %a, %b
+ ret i1 %c
+}
+
+; x <s 23 || x >s 17
+
+define i1 @or_slt_sgt_swap(i8 %x) {
+; CHECK-LABEL: @or_slt_sgt_swap(
+; CHECK-NEXT: ret i1 true
+;
+ %a = icmp slt i8 %x, 23
+ %b = icmp sgt i8 %x, 17
+ %c = or i1 %a, %b
+ ret i1 %c
+}
+
+; x <s 23 || x <=s 17
+
+define i1 @or_slt_sle_swap(i8 %x) {
+; CHECK-LABEL: @or_slt_sle_swap(
+; CHECK-NEXT: [[A:%.*]] = icmp slt i8 %x, 23
+; CHECK-NEXT: ret i1 [[A]]
+;
+ %a = icmp slt i8 %x, 23
+ %b = icmp sle i8 %x, 17
+ %c = or i1 %a, %b
+ ret i1 %c
+}
+
+; x <s 23 || x <s 17
+
+define i1 @or_slt_slt_swap(i8 %x) {
+; CHECK-LABEL: @or_slt_slt_swap(
+; CHECK-NEXT: [[A:%.*]] = icmp slt i8 %x, 23
+; CHECK-NEXT: ret i1 [[A]]
+;
+ %a = icmp slt i8 %x, 23
+ %b = icmp slt i8 %x, 17
+ %c = or i1 %a, %b
+ ret i1 %c
+}
+
+; x <s 23 || x >=u 17
+
+define i1 @or_slt_uge_swap(i8 %x) {
+; CHECK-LABEL: @or_slt_uge_swap(
+; CHECK-NEXT: ret i1 true
+;
+ %a = icmp slt i8 %x, 23
+ %b = icmp uge i8 %x, 17
+ %c = or i1 %a, %b
+ ret i1 %c
+}
+
+; x <s 23 || x >u 17
+
+define i1 @or_slt_ugt_swap(i8 %x) {
+; CHECK-LABEL: @or_slt_ugt_swap(
+; CHECK-NEXT: ret i1 true
+;
+ %a = icmp slt i8 %x, 23
+ %b = icmp ugt i8 %x, 17
+ %c = or i1 %a, %b
+ ret i1 %c
+}
+
+; x <s 23 || x <=u 17
+
+define i1 @or_slt_ule_swap(i8 %x) {
+; CHECK-LABEL: @or_slt_ule_swap(
+; CHECK-NEXT: [[A:%.*]] = icmp slt i8 %x, 23
+; CHECK-NEXT: ret i1 [[A]]
+;
+ %a = icmp slt i8 %x, 23
+ %b = icmp ule i8 %x, 17
+ %c = or i1 %a, %b
+ ret i1 %c
+}
+
+; x <s 23 || x <u 17
+
+define i1 @or_slt_ult_swap(i8 %x) {
+; CHECK-LABEL: @or_slt_ult_swap(
+; CHECK-NEXT: [[A:%.*]] = icmp slt i8 %x, 23
+; CHECK-NEXT: ret i1 [[A]]
+;
+ %a = icmp slt i8 %x, 23
+ %b = icmp ult i8 %x, 17
+ %c = or i1 %a, %b
+ ret i1 %c
+}
+
+; uge
+; x >=u 23 || x == 17
+
+define i1 @or_uge_eq_swap(i8 %x) {
+; CHECK-LABEL: @or_uge_eq_swap(
+; CHECK-NEXT: [[A:%.*]] = icmp uge i8 %x, 23
+; CHECK-NEXT: [[B:%.*]] = icmp eq i8 %x, 17
+; CHECK-NEXT: [[C:%.*]] = or i1 [[A]], [[B]]
+; CHECK-NEXT: ret i1 [[C]]
+;
+ %a = icmp uge i8 %x, 23
+ %b = icmp eq i8 %x, 17
+ %c = or i1 %a, %b
+ ret i1 %c
+}
+
+; x >=u 23 || x != 17
+
+define i1 @or_uge_ne_swap(i8 %x) {
+; CHECK-LABEL: @or_uge_ne_swap(
+; CHECK-NEXT: [[B:%.*]] = icmp ne i8 %x, 17
+; CHECK-NEXT: ret i1 [[B]]
+;
+ %a = icmp uge i8 %x, 23
+ %b = icmp ne i8 %x, 17
+ %c = or i1 %a, %b
+ ret i1 %c
+}
+
+; x >=u 23 || x >=s 17
+
+define i1 @or_uge_sge_swap(i8 %x) {
+; CHECK-LABEL: @or_uge_sge_swap(
+; CHECK-NEXT: [[A:%.*]] = icmp uge i8 %x, 23
+; CHECK-NEXT: [[B:%.*]] = icmp sge i8 %x, 17
+; CHECK-NEXT: [[C:%.*]] = or i1 [[A]], [[B]]
+; CHECK-NEXT: ret i1 [[C]]
+;
+ %a = icmp uge i8 %x, 23
+ %b = icmp sge i8 %x, 17
+ %c = or i1 %a, %b
+ ret i1 %c
+}
+
+; x >=u 23 || x >s 17
+
+define i1 @or_uge_sgt_swap(i8 %x) {
+; CHECK-LABEL: @or_uge_sgt_swap(
+; CHECK-NEXT: [[A:%.*]] = icmp uge i8 %x, 23
+; CHECK-NEXT: [[B:%.*]] = icmp sgt i8 %x, 17
+; CHECK-NEXT: [[C:%.*]] = or i1 [[A]], [[B]]
+; CHECK-NEXT: ret i1 [[C]]
+;
+ %a = icmp uge i8 %x, 23
+ %b = icmp sgt i8 %x, 17
+ %c = or i1 %a, %b
+ ret i1 %c
+}
+
+; x >=u 23 || x <=s 17
+
+define i1 @or_uge_sle_swap(i8 %x) {
+; CHECK-LABEL: @or_uge_sle_swap(
+; CHECK-NEXT: [[A:%.*]] = icmp uge i8 %x, 23
+; CHECK-NEXT: [[B:%.*]] = icmp sle i8 %x, 17
+; CHECK-NEXT: [[C:%.*]] = or i1 [[A]], [[B]]
+; CHECK-NEXT: ret i1 [[C]]
+;
+ %a = icmp uge i8 %x, 23
+ %b = icmp sle i8 %x, 17
+ %c = or i1 %a, %b
+ ret i1 %c
+}
+
+; x >=u 23 || x <s 17
+
+define i1 @or_uge_slt_swap(i8 %x) {
+; CHECK-LABEL: @or_uge_slt_swap(
+; CHECK-NEXT: [[A:%.*]] = icmp uge i8 %x, 23
+; CHECK-NEXT: [[B:%.*]] = icmp slt i8 %x, 17
+; CHECK-NEXT: [[C:%.*]] = or i1 [[A]], [[B]]
+; CHECK-NEXT: ret i1 [[C]]
+;
+ %a = icmp uge i8 %x, 23
+ %b = icmp slt i8 %x, 17
+ %c = or i1 %a, %b
+ ret i1 %c
+}
+
+; x >=u 23 || x >=u 17
+
+define i1 @or_uge_uge_swap(i8 %x) {
+; CHECK-LABEL: @or_uge_uge_swap(
+; CHECK-NEXT: [[B:%.*]] = icmp uge i8 %x, 17
+; CHECK-NEXT: ret i1 [[B]]
+;
+ %a = icmp uge i8 %x, 23
+ %b = icmp uge i8 %x, 17
+ %c = or i1 %a, %b
+ ret i1 %c
+}
+
+; x >=u 23 || x >u 17
+
+define i1 @or_uge_ugt_swap(i8 %x) {
+; CHECK-LABEL: @or_uge_ugt_swap(
+; CHECK-NEXT: [[B:%.*]] = icmp ugt i8 %x, 17
+; CHECK-NEXT: ret i1 [[B]]
+;
+ %a = icmp uge i8 %x, 23
+ %b = icmp ugt i8 %x, 17
+ %c = or i1 %a, %b
+ ret i1 %c
+}
+
+; x >=u 23 || x <=u 17
+
+define i1 @or_uge_ule_swap(i8 %x) {
+; CHECK-LABEL: @or_uge_ule_swap(
+; CHECK-NEXT: [[A:%.*]] = icmp uge i8 %x, 23
+; CHECK-NEXT: [[B:%.*]] = icmp ule i8 %x, 17
+; CHECK-NEXT: [[C:%.*]] = or i1 [[A]], [[B]]
+; CHECK-NEXT: ret i1 [[C]]
+;
+ %a = icmp uge i8 %x, 23
+ %b = icmp ule i8 %x, 17
+ %c = or i1 %a, %b
+ ret i1 %c
+}
+
+; x >=u 23 || x <u 17
+
+define i1 @or_uge_ult_swap(i8 %x) {
+; CHECK-LABEL: @or_uge_ult_swap(
+; CHECK-NEXT: [[A:%.*]] = icmp uge i8 %x, 23
+; CHECK-NEXT: [[B:%.*]] = icmp ult i8 %x, 17
+; CHECK-NEXT: [[C:%.*]] = or i1 [[A]], [[B]]
+; CHECK-NEXT: ret i1 [[C]]
+;
+ %a = icmp uge i8 %x, 23
+ %b = icmp ult i8 %x, 17
+ %c = or i1 %a, %b
+ ret i1 %c
+}
+
+; ugt
+; x >u 23 || x == 17
+
+define i1 @or_ugt_eq_swap(i8 %x) {
+; CHECK-LABEL: @or_ugt_eq_swap(
+; CHECK-NEXT: [[A:%.*]] = icmp ugt i8 %x, 23
+; CHECK-NEXT: [[B:%.*]] = icmp eq i8 %x, 17
+; CHECK-NEXT: [[C:%.*]] = or i1 [[A]], [[B]]
+; CHECK-NEXT: ret i1 [[C]]
+;
+ %a = icmp ugt i8 %x, 23
+ %b = icmp eq i8 %x, 17
+ %c = or i1 %a, %b
+ ret i1 %c
+}
+
+; x >u 23 || x != 17
+
+define i1 @or_ugt_ne_swap(i8 %x) {
+; CHECK-LABEL: @or_ugt_ne_swap(
+; CHECK-NEXT: [[B:%.*]] = icmp ne i8 %x, 17
+; CHECK-NEXT: ret i1 [[B]]
+;
+ %a = icmp ugt i8 %x, 23
+ %b = icmp ne i8 %x, 17
+ %c = or i1 %a, %b
+ ret i1 %c
+}
+
+; x >u 23 || x >=s 17
+
+define i1 @or_ugt_sge_swap(i8 %x) {
+; CHECK-LABEL: @or_ugt_sge_swap(
+; CHECK-NEXT: [[A:%.*]] = icmp ugt i8 %x, 23
+; CHECK-NEXT: [[B:%.*]] = icmp sge i8 %x, 17
+; CHECK-NEXT: [[C:%.*]] = or i1 [[A]], [[B]]
+; CHECK-NEXT: ret i1 [[C]]
+;
+ %a = icmp ugt i8 %x, 23
+ %b = icmp sge i8 %x, 17
+ %c = or i1 %a, %b
+ ret i1 %c
+}
+
+; x >u 23 || x >s 17
+
+define i1 @or_ugt_sgt_swap(i8 %x) {
+; CHECK-LABEL: @or_ugt_sgt_swap(
+; CHECK-NEXT: [[A:%.*]] = icmp ugt i8 %x, 23
+; CHECK-NEXT: [[B:%.*]] = icmp sgt i8 %x, 17
+; CHECK-NEXT: [[C:%.*]] = or i1 [[A]], [[B]]
+; CHECK-NEXT: ret i1 [[C]]
+;
+ %a = icmp ugt i8 %x, 23
+ %b = icmp sgt i8 %x, 17
+ %c = or i1 %a, %b
+ ret i1 %c
+}
+
+; x >u 23 || x <=s 17
+
+define i1 @or_ugt_sle_swap(i8 %x) {
+; CHECK-LABEL: @or_ugt_sle_swap(
+; CHECK-NEXT: [[A:%.*]] = icmp ugt i8 %x, 23
+; CHECK-NEXT: [[B:%.*]] = icmp sle i8 %x, 17
+; CHECK-NEXT: [[C:%.*]] = or i1 [[A]], [[B]]
+; CHECK-NEXT: ret i1 [[C]]
+;
+ %a = icmp ugt i8 %x, 23
+ %b = icmp sle i8 %x, 17
+ %c = or i1 %a, %b
+ ret i1 %c
+}
+
+; x >u 23 || x <s 17
+
+define i1 @or_ugt_slt_swap(i8 %x) {
+; CHECK-LABEL: @or_ugt_slt_swap(
+; CHECK-NEXT: [[A:%.*]] = icmp ugt i8 %x, 23
+; CHECK-NEXT: [[B:%.*]] = icmp slt i8 %x, 17
+; CHECK-NEXT: [[C:%.*]] = or i1 [[A]], [[B]]
+; CHECK-NEXT: ret i1 [[C]]
+;
+ %a = icmp ugt i8 %x, 23
+ %b = icmp slt i8 %x, 17
+ %c = or i1 %a, %b
+ ret i1 %c
+}
+
+; x >u 23 || x >=u 17
+
+define i1 @or_ugt_uge_swap(i8 %x) {
+; CHECK-LABEL: @or_ugt_uge_swap(
+; CHECK-NEXT: [[B:%.*]] = icmp uge i8 %x, 17
+; CHECK-NEXT: ret i1 [[B]]
+;
+ %a = icmp ugt i8 %x, 23
+ %b = icmp uge i8 %x, 17
+ %c = or i1 %a, %b
+ ret i1 %c
+}
+
+; x >u 23 || x >u 17
+
+define i1 @or_ugt_ugt_swap(i8 %x) {
+; CHECK-LABEL: @or_ugt_ugt_swap(
+; CHECK-NEXT: [[B:%.*]] = icmp ugt i8 %x, 17
+; CHECK-NEXT: ret i1 [[B]]
+;
+ %a = icmp ugt i8 %x, 23
+ %b = icmp ugt i8 %x, 17
+ %c = or i1 %a, %b
+ ret i1 %c
+}
+
+; x >u 23 || x <=u 17
+
+define i1 @or_ugt_ule_swap(i8 %x) {
+; CHECK-LABEL: @or_ugt_ule_swap(
+; CHECK-NEXT: [[A:%.*]] = icmp ugt i8 %x, 23
+; CHECK-NEXT: [[B:%.*]] = icmp ule i8 %x, 17
+; CHECK-NEXT: [[C:%.*]] = or i1 [[A]], [[B]]
+; CHECK-NEXT: ret i1 [[C]]
+;
+ %a = icmp ugt i8 %x, 23
+ %b = icmp ule i8 %x, 17
+ %c = or i1 %a, %b
+ ret i1 %c
+}
+
+; x >u 23 || x <u 17
+
+define i1 @or_ugt_ult_swap(i8 %x) {
+; CHECK-LABEL: @or_ugt_ult_swap(
+; CHECK-NEXT: [[A:%.*]] = icmp ugt i8 %x, 23
+; CHECK-NEXT: [[B:%.*]] = icmp ult i8 %x, 17
+; CHECK-NEXT: [[C:%.*]] = or i1 [[A]], [[B]]
+; CHECK-NEXT: ret i1 [[C]]
+;
+ %a = icmp ugt i8 %x, 23
+ %b = icmp ult i8 %x, 17
+ %c = or i1 %a, %b
+ ret i1 %c
+}
+
+; ule
+; x <=u 23 || x == 17
+
+define i1 @or_ule_eq_swap(i8 %x) {
+; CHECK-LABEL: @or_ule_eq_swap(
+; CHECK-NEXT: [[A:%.*]] = icmp ule i8 %x, 23
+; CHECK-NEXT: ret i1 [[A]]
+;
+ %a = icmp ule i8 %x, 23
+ %b = icmp eq i8 %x, 17
+ %c = or i1 %a, %b
+ ret i1 %c
+}
+
+; x <=u 23 || x != 17
+
+define i1 @or_ule_ne_swap(i8 %x) {
+; CHECK-LABEL: @or_ule_ne_swap(
+; CHECK-NEXT: ret i1 true
+;
+ %a = icmp ule i8 %x, 23
+ %b = icmp ne i8 %x, 17
+ %c = or i1 %a, %b
+ ret i1 %c
+}
+
+; x <=u 23 || x >=s 17
+
+define i1 @or_ule_sge_swap(i8 %x) {
+; CHECK-LABEL: @or_ule_sge_swap(
+; CHECK-NEXT: [[A:%.*]] = icmp ule i8 %x, 23
+; CHECK-NEXT: [[B:%.*]] = icmp sge i8 %x, 17
+; CHECK-NEXT: [[C:%.*]] = or i1 [[A]], [[B]]
+; CHECK-NEXT: ret i1 [[C]]
+;
+ %a = icmp ule i8 %x, 23
+ %b = icmp sge i8 %x, 17
+ %c = or i1 %a, %b
+ ret i1 %c
+}
+
+; x <=u 23 || x >s 17
+
+define i1 @or_ule_sgt_swap(i8 %x) {
+; CHECK-LABEL: @or_ule_sgt_swap(
+; CHECK-NEXT: [[A:%.*]] = icmp ule i8 %x, 23
+; CHECK-NEXT: [[B:%.*]] = icmp sgt i8 %x, 17
+; CHECK-NEXT: [[C:%.*]] = or i1 [[A]], [[B]]
+; CHECK-NEXT: ret i1 [[C]]
+;
+ %a = icmp ule i8 %x, 23
+ %b = icmp sgt i8 %x, 17
+ %c = or i1 %a, %b
+ ret i1 %c
+}
+
+; x <=u 23 || x <=s 17
+
+define i1 @or_ule_sle_swap(i8 %x) {
+; CHECK-LABEL: @or_ule_sle_swap(
+; CHECK-NEXT: [[A:%.*]] = icmp ule i8 %x, 23
+; CHECK-NEXT: [[B:%.*]] = icmp sle i8 %x, 17
+; CHECK-NEXT: [[C:%.*]] = or i1 [[A]], [[B]]
+; CHECK-NEXT: ret i1 [[C]]
+;
+ %a = icmp ule i8 %x, 23
+ %b = icmp sle i8 %x, 17
+ %c = or i1 %a, %b
+ ret i1 %c
+}
+
+; x <=u 23 || x <s 17
+
+define i1 @or_ule_slt_swap(i8 %x) {
+; CHECK-LABEL: @or_ule_slt_swap(
+; CHECK-NEXT: [[A:%.*]] = icmp ule i8 %x, 23
+; CHECK-NEXT: [[B:%.*]] = icmp slt i8 %x, 17
+; CHECK-NEXT: [[C:%.*]] = or i1 [[A]], [[B]]
+; CHECK-NEXT: ret i1 [[C]]
+;
+ %a = icmp ule i8 %x, 23
+ %b = icmp slt i8 %x, 17
+ %c = or i1 %a, %b
+ ret i1 %c
+}
+
+; x <=u 23 || x >=u 17
+
+define i1 @or_ule_uge_swap(i8 %x) {
+; CHECK-LABEL: @or_ule_uge_swap(
+; CHECK-NEXT: ret i1 true
+;
+ %a = icmp ule i8 %x, 23
+ %b = icmp uge i8 %x, 17
+ %c = or i1 %a, %b
+ ret i1 %c
+}
+
+; x <=u 23 || x >u 17
+
+define i1 @or_ule_ugt_swap(i8 %x) {
+; CHECK-LABEL: @or_ule_ugt_swap(
+; CHECK-NEXT: ret i1 true
+;
+ %a = icmp ule i8 %x, 23
+ %b = icmp ugt i8 %x, 17
+ %c = or i1 %a, %b
+ ret i1 %c
+}
+
+; x <=u 23 || x <=u 17
+
+define i1 @or_ule_ule_swap(i8 %x) {
+; CHECK-LABEL: @or_ule_ule_swap(
+; CHECK-NEXT: [[A:%.*]] = icmp ule i8 %x, 23
+; CHECK-NEXT: ret i1 [[A]]
+;
+ %a = icmp ule i8 %x, 23
+ %b = icmp ule i8 %x, 17
+ %c = or i1 %a, %b
+ ret i1 %c
+}
+
+; x <=u 23 || x <u 17
+
+define i1 @or_ule_ult_swap(i8 %x) {
+; CHECK-LABEL: @or_ule_ult_swap(
+; CHECK-NEXT: [[A:%.*]] = icmp ule i8 %x, 23
+; CHECK-NEXT: ret i1 [[A]]
+;
+ %a = icmp ule i8 %x, 23
+ %b = icmp ult i8 %x, 17
+ %c = or i1 %a, %b
+ ret i1 %c
+}
+
+; ult
+; x <u 23 || x == 17
+
+define i1 @or_ult_eq_swap(i8 %x) {
+; CHECK-LABEL: @or_ult_eq_swap(
+; CHECK-NEXT: [[A:%.*]] = icmp ult i8 %x, 23
+; CHECK-NEXT: ret i1 [[A]]
+;
+ %a = icmp ult i8 %x, 23
+ %b = icmp eq i8 %x, 17
+ %c = or i1 %a, %b
+ ret i1 %c
+}
+
+; x <u 23 || x != 17
+
+define i1 @or_ult_ne_swap(i8 %x) {
+; CHECK-LABEL: @or_ult_ne_swap(
+; CHECK-NEXT: ret i1 true
+;
+ %a = icmp ult i8 %x, 23
+ %b = icmp ne i8 %x, 17
+ %c = or i1 %a, %b
+ ret i1 %c
+}
+
+; x <u 23 || x >=s 17
+
+define i1 @or_ult_sge_swap(i8 %x) {
+; CHECK-LABEL: @or_ult_sge_swap(
+; CHECK-NEXT: [[A:%.*]] = icmp ult i8 %x, 23
+; CHECK-NEXT: [[B:%.*]] = icmp sge i8 %x, 17
+; CHECK-NEXT: [[C:%.*]] = or i1 [[A]], [[B]]
+; CHECK-NEXT: ret i1 [[C]]
+;
+ %a = icmp ult i8 %x, 23
+ %b = icmp sge i8 %x, 17
+ %c = or i1 %a, %b
+ ret i1 %c
+}
+
+; x <u 23 || x >s 17
+
+define i1 @or_ult_sgt_swap(i8 %x) {
+; CHECK-LABEL: @or_ult_sgt_swap(
+; CHECK-NEXT: [[A:%.*]] = icmp ult i8 %x, 23
+; CHECK-NEXT: [[B:%.*]] = icmp sgt i8 %x, 17
+; CHECK-NEXT: [[C:%.*]] = or i1 [[A]], [[B]]
+; CHECK-NEXT: ret i1 [[C]]
+;
+ %a = icmp ult i8 %x, 23
+ %b = icmp sgt i8 %x, 17
+ %c = or i1 %a, %b
+ ret i1 %c
+}
+
+; x <u 23 || x <=s 17
+
+define i1 @or_ult_sle_swap(i8 %x) {
+; CHECK-LABEL: @or_ult_sle_swap(
+; CHECK-NEXT: [[A:%.*]] = icmp ult i8 %x, 23
+; CHECK-NEXT: [[B:%.*]] = icmp sle i8 %x, 17
+; CHECK-NEXT: [[C:%.*]] = or i1 [[A]], [[B]]
+; CHECK-NEXT: ret i1 [[C]]
+;
+ %a = icmp ult i8 %x, 23
+ %b = icmp sle i8 %x, 17
+ %c = or i1 %a, %b
+ ret i1 %c
+}
+
+; x <u 23 || x <s 17
+
+define i1 @or_ult_slt_swap(i8 %x) {
+; CHECK-LABEL: @or_ult_slt_swap(
+; CHECK-NEXT: [[A:%.*]] = icmp ult i8 %x, 23
+; CHECK-NEXT: [[B:%.*]] = icmp slt i8 %x, 17
+; CHECK-NEXT: [[C:%.*]] = or i1 [[A]], [[B]]
+; CHECK-NEXT: ret i1 [[C]]
+;
+ %a = icmp ult i8 %x, 23
+ %b = icmp slt i8 %x, 17
+ %c = or i1 %a, %b
+ ret i1 %c
+}
+
+; x <u 23 || x >=u 17
+
+define i1 @or_ult_uge_swap(i8 %x) {
+; CHECK-LABEL: @or_ult_uge_swap(
+; CHECK-NEXT: ret i1 true
+;
+ %a = icmp ult i8 %x, 23
+ %b = icmp uge i8 %x, 17
+ %c = or i1 %a, %b
+ ret i1 %c
+}
+
+; x <u 23 || x >u 17
+
+define i1 @or_ult_ugt_swap(i8 %x) {
+; CHECK-LABEL: @or_ult_ugt_swap(
+; CHECK-NEXT: ret i1 true
+;
+ %a = icmp ult i8 %x, 23
+ %b = icmp ugt i8 %x, 17
+ %c = or i1 %a, %b
+ ret i1 %c
+}
+
+; x <u 23 || x <=u 17
+
+define i1 @or_ult_ule_swap(i8 %x) {
+; CHECK-LABEL: @or_ult_ule_swap(
+; CHECK-NEXT: [[A:%.*]] = icmp ult i8 %x, 23
+; CHECK-NEXT: ret i1 [[A]]
+;
+ %a = icmp ult i8 %x, 23
+ %b = icmp ule i8 %x, 17
+ %c = or i1 %a, %b
+ ret i1 %c
+}
+
+; x <u 23 || x <u 17
+
+define i1 @or_ult_ult_swap(i8 %x) {
+; CHECK-LABEL: @or_ult_ult_swap(
+; CHECK-NEXT: [[A:%.*]] = icmp ult i8 %x, 23
+; CHECK-NEXT: ret i1 [[A]]
+;
+ %a = icmp ult i8 %x, 23
+ %b = icmp ult i8 %x, 17
+ %c = or i1 %a, %b
+ ret i1 %c
+}
+
; Special case - slt is uge
; x <u 31 && x <s 0
diff --git a/test/Transforms/InstSimplify/shufflevector.ll b/test/Transforms/InstSimplify/shufflevector.ll
index 6af0db8e5a44..cc49ae3554c0 100644
--- a/test/Transforms/InstSimplify/shufflevector.ll
+++ b/test/Transforms/InstSimplify/shufflevector.ll
@@ -233,3 +233,17 @@ define <8 x i64> @PR30630(<8 x i64> %x) {
ret <8 x i64> %s7
}
+; This case covers internal canonicalization of shuffles with one constant input vector.
+
+;FIXME: Another issue exposed here, this whole function could be simplified to:
+; ret <2 x float> zeroinitializer
+define <2 x float> @PR32872(<2 x float> %x) {
+; CHECK-LABEL: @PR32872(
+; CHECK-NEXT: [[TMP1:%.*]] = shufflevector <2 x float> [[X:%.*]], <2 x float> zeroinitializer, <4 x i32> <i32 2, i32 2, i32 0, i32 1>
+; CHECK-NEXT: [[TMP4:%.*]] = shufflevector <4 x float> zeroinitializer, <4 x float> [[TMP1]], <2 x i32> <i32 4, i32 5>
+; CHECK-NEXT: ret <2 x float> [[TMP4]]
+;
+ %tmp1 = shufflevector <2 x float> %x, <2 x float> zeroinitializer, <4 x i32> <i32 2, i32 2, i32 0, i32 1>
+ %tmp4 = shufflevector <4 x float> zeroinitializer, <4 x float> %tmp1, <2 x i32> <i32 4, i32 5>
+ ret <2 x float> %tmp4
+}
diff --git a/test/Transforms/LoopIdiom/unsafe.ll b/test/Transforms/LoopIdiom/unsafe.ll
new file mode 100644
index 000000000000..8eff8996adfa
--- /dev/null
+++ b/test/Transforms/LoopIdiom/unsafe.ll
@@ -0,0 +1,55 @@
+; RUN: opt -S < %s -loop-idiom | FileCheck %s
+; CHECK-NOT: memset
+; check that memset is not generated (for stores) because that will result
+; in udiv hoisted out of the loop by the SCEV Expander
+; TODO: ideally we should be able to generate memset
+; if SCEV expander is taught to generate the dependencies
+; at the right point.
+
+@a = global i32 0, align 4
+@b = global i32 0, align 4
+@c = external local_unnamed_addr global [1 x i8], align 1
+
+define void @e() local_unnamed_addr {
+entry:
+ %d0 = load i32, i32* @a, align 4
+ %d1 = load i32, i32* @b, align 4
+ br label %for.cond1thread-pre-split
+
+for.cond1thread-pre-split: ; preds = %for.body5, %entry
+ %div = udiv i32 %d0, %d1
+ br label %for.body5
+
+for.body5: ; preds = %for.body5, %for.cond1thread-pre-split
+ %indvars.iv = phi i64 [ 0, %for.cond1thread-pre-split ], [ %indvars.iv.next, %for.body5 ]
+ %divx = sext i32 %div to i64
+ %0 = add nsw i64 %divx, %indvars.iv
+ %arrayidx = getelementptr inbounds [1 x i8], [1 x i8]* @c, i64 0, i64 %0
+ store i8 0, i8* %arrayidx, align 1
+ %indvars.iv.next = add nsw i64 %indvars.iv, 1
+ %1 = trunc i64 %indvars.iv.next to i32
+ %tobool4 = icmp eq i32 %1, 0
+ br i1 %tobool4, label %for.cond1thread-pre-split, label %for.body5
+}
+
+; The loop's trip count is depending on an unsafe operation
+; udiv. SCEV expander hoists it out of the loop, so loop-idiom
+; should check that the memset is not generated in this case.
+define void @f(i32 %a, i32 %b, i8* nocapture %x) local_unnamed_addr {
+entry:
+ br label %for.body
+
+for.body: ; preds = %for.body6, %entry
+ %div = udiv i32 %a, %b
+ %conv = zext i32 %div to i64
+ br label %for.body6
+
+for.body6: ; preds = %for.body6, %for.body
+ %i.09 = phi i64 [ %inc, %for.body6 ], [ 0, %for.body ]
+ %arrayidx = getelementptr inbounds i8, i8* %x, i64 %i.09
+ store i8 0, i8* %arrayidx, align 1
+ %inc = add nuw nsw i64 %i.09, 1
+ %cmp3 = icmp slt i64 %inc, %conv
+ br i1 %cmp3, label %for.body6, label %for.body
+}
+
diff --git a/test/Transforms/LoopRotate/dbgvalue.ll b/test/Transforms/LoopRotate/dbgvalue.ll
index 9ff8bda4bc08..90105047f86a 100644
--- a/test/Transforms/LoopRotate/dbgvalue.ll
+++ b/test/Transforms/LoopRotate/dbgvalue.ll
@@ -38,7 +38,7 @@ return: ; preds = %if.end
ret i32 %z.tr, !dbg !17
}
-define i32 @tak2(i32 %x, i32 %y, i32 %z) nounwind ssp !dbg !0 {
+define i32 @tak2(i32 %x, i32 %y, i32 %z) nounwind ssp !dbg !21 {
; CHECK-LABEL: define i32 @tak2(
; CHECK: entry
; CHECK: tail call void @llvm.dbg.value(metadata i32 %x.tr
@@ -51,29 +51,29 @@ tailrecurse: ; preds = %if.then, %entry
%x.tr = phi i32 [ %x, %entry ], [ %call, %if.then ]
%y.tr = phi i32 [ %y, %entry ], [ %call9, %if.then ]
%z.tr = phi i32 [ %z, %entry ], [ %call14, %if.then ]
- %cmp = icmp slt i32 %y.tr, %x.tr, !dbg !12
- br i1 %cmp, label %if.then, label %if.end, !dbg !12
+ %cmp = icmp slt i32 %y.tr, %x.tr, !dbg !22
+ br i1 %cmp, label %if.then, label %if.end, !dbg !22
if.then: ; preds = %tailrecurse
- tail call void @llvm.dbg.value(metadata i32 %x.tr, i64 0, metadata !6, metadata !DIExpression()), !dbg !7
- tail call void @llvm.dbg.value(metadata i32 %y.tr, i64 0, metadata !8, metadata !DIExpression()), !dbg !9
- tail call void @llvm.dbg.value(metadata i32 %z.tr, i64 0, metadata !10, metadata !DIExpression()), !dbg !11
- %sub = sub nsw i32 %x.tr, 1, !dbg !14
- %call = tail call i32 @tak(i32 %sub, i32 %y.tr, i32 %z.tr), !dbg !14
- %sub6 = sub nsw i32 %y.tr, 1, !dbg !14
- %call9 = tail call i32 @tak(i32 %sub6, i32 %z.tr, i32 %x.tr), !dbg !14
- %sub11 = sub nsw i32 %z.tr, 1, !dbg !14
- %call14 = tail call i32 @tak(i32 %sub11, i32 %x.tr, i32 %y.tr), !dbg !14
+ tail call void @llvm.dbg.value(metadata i32 %x.tr, i64 0, metadata !36, metadata !DIExpression()), !dbg !37
+ tail call void @llvm.dbg.value(metadata i32 %y.tr, i64 0, metadata !38, metadata !DIExpression()), !dbg !39
+ tail call void @llvm.dbg.value(metadata i32 %z.tr, i64 0, metadata !40, metadata !DIExpression()), !dbg !41
+ %sub = sub nsw i32 %x.tr, 1, !dbg !24
+ %call = tail call i32 @tak(i32 %sub, i32 %y.tr, i32 %z.tr), !dbg !24
+ %sub6 = sub nsw i32 %y.tr, 1, !dbg !24
+ %call9 = tail call i32 @tak(i32 %sub6, i32 %z.tr, i32 %x.tr), !dbg !24
+ %sub11 = sub nsw i32 %z.tr, 1, !dbg !24
+ %call14 = tail call i32 @tak(i32 %sub11, i32 %x.tr, i32 %y.tr), !dbg !24
br label %tailrecurse
if.end: ; preds = %tailrecurse
- tail call void @llvm.dbg.value(metadata i32 %x.tr, i64 0, metadata !6, metadata !DIExpression()), !dbg !7
- tail call void @llvm.dbg.value(metadata i32 %y.tr, i64 0, metadata !8, metadata !DIExpression()), !dbg !9
- tail call void @llvm.dbg.value(metadata i32 %z.tr, i64 0, metadata !10, metadata !DIExpression()), !dbg !11
- br label %return, !dbg !16
+ tail call void @llvm.dbg.value(metadata i32 %x.tr, i64 0, metadata !36, metadata !DIExpression()), !dbg !37
+ tail call void @llvm.dbg.value(metadata i32 %y.tr, i64 0, metadata !38, metadata !DIExpression()), !dbg !39
+ tail call void @llvm.dbg.value(metadata i32 %z.tr, i64 0, metadata !40, metadata !DIExpression()), !dbg !41
+ br label %return, !dbg !26
return: ; preds = %if.end
- ret i32 %z.tr, !dbg !17
+ ret i32 %z.tr, !dbg !27
}
@channelColumns = external global i64
@@ -143,3 +143,16 @@ for.end:
!17 = !DILocation(line: 37, column: 1, scope: !13)
!18 = !DIFile(filename: "/Volumes/Lalgate/cj/llvm/projects/llvm-test/SingleSource/Benchmarks/BenchmarkGame/recursive.c", directory: "/Volumes/Lalgate/cj/D/projects/llvm-test/SingleSource/Benchmarks/BenchmarkGame")
!20 = !{i32 1, !"Debug Info Version", i32 3}
+!21 = distinct !DISubprogram(name: "tak", line: 32, isLocal: false, isDefinition: true, virtualIndex: 6, flags: DIFlagPrototyped, isOptimized: false, unit: !2, file: !18, scope: !1, type: !3)
+!22 = !DILocation(line: 33, column: 3, scope: !23)
+!23 = distinct !DILexicalBlock(line: 32, column: 30, file: !18, scope: !21)
+!24 = !DILocation(line: 34, column: 5, scope: !25)
+!25 = distinct !DILexicalBlock(line: 33, column: 14, file: !18, scope: !23)
+!26 = !DILocation(line: 36, column: 3, scope: !23)
+!27 = !DILocation(line: 37, column: 1, scope: !23)
+!36 = !DILocalVariable(name: "x", line: 32, arg: 1, scope: !21, file: !1, type: !5)
+!37 = !DILocation(line: 32, column: 13, scope: !21)
+!38 = !DILocalVariable(name: "y", line: 32, arg: 2, scope: !21, file: !1, type: !5)
+!39 = !DILocation(line: 32, column: 20, scope: !21)
+!40 = !DILocalVariable(name: "z", line: 32, arg: 3, scope: !21, file: !1, type: !5)
+!41 = !DILocation(line: 32, column: 27, scope: !21)
diff --git a/test/Transforms/SampleProfile/Inputs/indirect-call.prof b/test/Transforms/SampleProfile/Inputs/indirect-call.prof
index aaf9ec15d02e..ff7be5df977a 100644
--- a/test/Transforms/SampleProfile/Inputs/indirect-call.prof
+++ b/test/Transforms/SampleProfile/Inputs/indirect-call.prof
@@ -1,19 +1,19 @@
test:63067:0
- 4: 3345 _Z3barv:1398 _Z3foov:2059
+ 1: 3345 _Z3barv:1398 _Z3foov:2059
test_inline:3000:0
- 5: foo_inline1:3000
- 1: 3000
- 5: foo_inline2:4000
- 1: 4000
+ 1: foo_inline1:3000
+ 11: 3000
+ 1: foo_inline2:4000
+ 19: 4000
test_noinline:3000:0
- 5: foo_noinline:3000
- 1: 3000
+ 1: foo_noinline:3000
+ 20: 3000
test_direct:3000:0
- 5: foo_direct:3000
- 1: 3000
+ 1: foo_direct:3000
+ 21: 3000
test_inline_strip:3000:0
- 5: foo_inline_strip:3000
+ 1: foo_inline_strip:3000
1: 3000
-test_inline_strip_confilict:3000:0
- 5: foo_inline_strip_conflict:3000
+test_inline_strip_conflict:3000:0
+ 1: foo_inline_strip_conflict:3000
1: 3000
diff --git a/test/Transforms/SampleProfile/indirect-call.ll b/test/Transforms/SampleProfile/indirect-call.ll
index 4647dd421299..4101f6f492e5 100644
--- a/test/Transforms/SampleProfile/indirect-call.ll
+++ b/test/Transforms/SampleProfile/indirect-call.ll
@@ -12,7 +12,7 @@ define void @test(void ()*) !dbg !3 {
; CHECK-LABEL: @test_inline
; If the indirect call is promoted and inlined in profile, we should promote and inline it.
-define void @test_inline(i64* (i32*)*, i32* %x) !dbg !3 {
+define void @test_inline(i64* (i32*)*, i32* %x) !dbg !6 {
%2 = alloca i64* (i32*)*
store i64* (i32*)* %0, i64* (i32*)** %2
%3 = load i64* (i32*)*, i64* (i32*)** %2
@@ -25,14 +25,14 @@ define void @test_inline(i64* (i32*)*, i32* %x) !dbg !3 {
; CHECK-NOT: call
; CHECK: if.false.orig_indirect2:
; CHECK: call
- call i64* %3(i32* %x), !dbg !5
+ call i64* %3(i32* %x), !dbg !7
ret void
}
; CHECK-LABEL: @test_inline_strip
; If the indirect call is promoted and inlined in profile, and the callee name
; is stripped we should promote and inline it.
-define void @test_inline_strip(i64* (i32*)*, i32* %x) !dbg !3 {
+define void @test_inline_strip(i64* (i32*)*, i32* %x) !dbg !8 {
%2 = alloca i64* (i32*)*
store i64* (i32*)* %0, i64* (i32*)** %2
%3 = load i64* (i32*)*, i64* (i32*)** %2
@@ -41,74 +41,74 @@ define void @test_inline_strip(i64* (i32*)*, i32* %x) !dbg !3 {
; CHECK-NOT: call
; CHECK: if.false.orig_indirect:
; CHECK: call
- call i64* %3(i32* %x), !dbg !5
+ call i64* %3(i32* %x), !dbg !9
ret void
}
; CHECK-LABEL: @test_inline_strip_conflict
; If the indirect call is promoted and inlined in profile, and the callee name
; is stripped, but have more than 1 potential match, we should not promote.
-define void @test_inline_strip_conflict(i64* (i32*)*, i32* %x) !dbg !3 {
+define void @test_inline_strip_conflict(i64* (i32*)*, i32* %x) !dbg !10 {
%2 = alloca i64* (i32*)*
store i64* (i32*)* %0, i64* (i32*)** %2
%3 = load i64* (i32*)*, i64* (i32*)** %2
; CHECK-NOT: if.true.direct_targ:
- call i64* %3(i32* %x), !dbg !5
+ call i64* %3(i32* %x), !dbg !11
ret void
}
; CHECK-LABEL: @test_noinline
; If the indirect call target is not available, we should not promote it.
-define void @test_noinline(void ()*) !dbg !3 {
+define void @test_noinline(void ()*) !dbg !12 {
%2 = alloca void ()*
store void ()* %0, void ()** %2
%3 = load void ()*, void ()** %2
; CHECK-NOT: icmp
; CHECK: call
- call void %3(), !dbg !5
+ call void %3(), !dbg !13
ret void
}
@x = global i32 0, align 4
-define i32* @foo_inline1(i32* %x) !dbg !3 {
+define i32* @foo_inline1(i32* %x) !dbg !14 {
ret i32* %x
}
-define i32* @foo_inline_strip.suffix(i32* %x) !dbg !3 {
+define i32* @foo_inline_strip.suffix(i32* %x) !dbg !15 {
ret i32* %x
}
-define i32* @foo_inline_strip_conflict.suffix1(i32* %x) !dbg !3 {
+define i32* @foo_inline_strip_conflict.suffix1(i32* %x) !dbg !16 {
ret i32* %x
}
-define i32* @foo_inline_strip_conflict.suffix2(i32* %x) !dbg !3 {
+define i32* @foo_inline_strip_conflict.suffix2(i32* %x) !dbg !17 {
ret i32* %x
}
-define i32* @foo_inline_strip_conflict.suffix3(i32* %x) !dbg !3 {
+define i32* @foo_inline_strip_conflict.suffix3(i32* %x) !dbg !18 {
ret i32* %x
}
-define i32* @foo_inline2(i32* %x) !dbg !3 {
+define i32* @foo_inline2(i32* %x) !dbg !19 {
ret i32* %x
}
-define i32 @foo_noinline(i32 %x) !dbg !3 {
+define i32 @foo_noinline(i32 %x) !dbg !20 {
ret i32 %x
}
-define void @foo_direct() !dbg !3 {
+define void @foo_direct() !dbg !21 {
ret void
}
; CHECK-LABEL: @test_direct
; We should not promote a direct call.
-define void @test_direct() !dbg !3 {
+define void @test_direct() !dbg !22 {
; CHECK-NOT: icmp
; CHECK: call
- call void @foo_alias(), !dbg !5
+ call void @foo_alias(), !dbg !23
ret void
}
@@ -120,7 +120,25 @@ define void @test_direct() !dbg !3 {
!0 = distinct !DICompileUnit(language: DW_LANG_C_plus_plus, file: !1)
!1 = !DIFile(filename: "test.cc", directory: "/")
!2 = !{i32 2, !"Debug Info Version", i32 3}
-!3 = distinct !DISubprogram(name: "test", scope: !1, file: !1, line: 1, unit: !0)
-!4 = !DILocation(line: 5, scope: !3)
+!3 = distinct !DISubprogram(name: "test", scope: !1, file: !1, line: 3, unit: !0)
+!4 = !DILocation(line: 4, scope: !3)
!5 = !DILocation(line: 6, scope: !3)
; CHECK: ![[PROF]] = !{!"VP", i32 0, i64 3457, i64 9191153033785521275, i64 2059, i64 -1069303473483922844, i64 1398}
+!6 = distinct !DISubprogram(name: "test_inline", scope: !1, file: !1, line: 6, unit: !0)
+!7 = !DILocation(line: 7, scope: !6)
+!8 = distinct !DISubprogram(name: "test_inline_strip", scope: !1, file: !1, line: 8, unit: !0)
+!9 = !DILocation(line: 9, scope: !8)
+!10 = distinct !DISubprogram(name: "test_inline_strip_conflict", scope: !1, file: !1, line: 10, unit: !0)
+!11 = !DILocation(line: 11, scope: !10)
+!12 = distinct !DISubprogram(name: "test_noinline", scope: !1, file: !1, line: 12, unit: !0)
+!13 = !DILocation(line: 13, scope: !12)
+!14 = distinct !DISubprogram(name: "foo_inline1", scope: !1, file: !1, line: 11, unit: !0)
+!15 = distinct !DISubprogram(name: "foo_inline_strip.suffix", scope: !1, file: !1, line: 1, unit: !0)
+!16 = distinct !DISubprogram(name: "foo_inline_strip_conflict.suffix1", scope: !1, file: !1, line: 1, unit: !0)
+!17 = distinct !DISubprogram(name: "foo_inline_strip_conflict.suffix2", scope: !1, file: !1, line: 1, unit: !0)
+!18 = distinct !DISubprogram(name: "foo_inline_strip_conflict.suffix3", scope: !1, file: !1, line: 1, unit: !0)
+!19 = distinct !DISubprogram(name: "foo_inline2", scope: !1, file: !1, line: 19, unit: !0)
+!20 = distinct !DISubprogram(name: "foo_noinline", scope: !1, file: !1, line: 20, unit: !0)
+!21 = distinct !DISubprogram(name: "foo_direct", scope: !1, file: !1, line: 21, unit: !0)
+!22 = distinct !DISubprogram(name: "test_direct", scope: !1, file: !1, line: 22, unit: !0)
+!23 = !DILocation(line: 23, scope: !22)
diff --git a/test/Unit/lit.cfg b/test/Unit/lit.cfg
index 30a5d3fab826..dac0bf829ba6 100644
--- a/test/Unit/lit.cfg
+++ b/test/Unit/lit.cfg
@@ -43,6 +43,10 @@ if sys.platform in ['win32', 'cygwin'] and os.path.isdir(config.shlibdir):
config.environment['PATH'] = os.path.pathsep.join((
config.shlibdir, config.environment['PATH']))
+# Win32 may use %SYSTEMDRIVE% during file system shell operations, so propogate.
+if sys.platform == 'win32' and 'SYSTEMDRIVE' in os.environ:
+ config.environment['SYSTEMDRIVE'] = os.environ['SYSTEMDRIVE']
+
###
# Check that the object root is known.
diff --git a/test/tools/llvm-objdump/WebAssembly/symbol-table.test b/test/tools/llvm-objdump/WebAssembly/symbol-table.test
new file mode 100644
index 000000000000..8936c7a12e4c
--- /dev/null
+++ b/test/tools/llvm-objdump/WebAssembly/symbol-table.test
@@ -0,0 +1,8 @@
+RUN: llvm-objdump -t %p/../Inputs/test.wasm | FileCheck %s
+
+CHECK: SYMBOL TABLE:
+CHECK: 00000000 l F IMPORT bar
+CHECK: 00000000 g F EXPORT baz
+CHECK: 00000001 g F EXPORT quux
+CHECK: 00000000 l F name $import
+CHECK: 00000001 l F name $func0
diff --git a/test/tools/llvm-readobj/Inputs/resources/cursor_small.bmp b/test/tools/llvm-readobj/Inputs/resources/cursor_small.bmp
new file mode 100644
index 000000000000..ce513261bc2c
--- /dev/null
+++ b/test/tools/llvm-readobj/Inputs/resources/cursor_small.bmp
Binary files differ
diff --git a/test/tools/llvm-readobj/Inputs/resources/okay_small.bmp b/test/tools/llvm-readobj/Inputs/resources/okay_small.bmp
new file mode 100644
index 000000000000..e4005bf5ef97
--- /dev/null
+++ b/test/tools/llvm-readobj/Inputs/resources/okay_small.bmp
Binary files differ
diff --git a/test/tools/llvm-readobj/Inputs/resources/test_resource.obj.coff b/test/tools/llvm-readobj/Inputs/resources/test_resource.obj.coff
new file mode 100644
index 000000000000..b9a7908b1c5c
--- /dev/null
+++ b/test/tools/llvm-readobj/Inputs/resources/test_resource.obj.coff
Binary files differ
diff --git a/test/tools/llvm-readobj/Inputs/resources/test_resource.rc b/test/tools/llvm-readobj/Inputs/resources/test_resource.rc
new file mode 100644
index 000000000000..fd616520dbe1
--- /dev/null
+++ b/test/tools/llvm-readobj/Inputs/resources/test_resource.rc
@@ -0,0 +1,44 @@
+#include "windows.h"
+
+LANGUAGE LANG_ENGLISH, SUBLANG_ENGLISH_US
+
+myaccelerators ACCELERATORS
+{
+ "^C", 999, VIRTKEY, ALT
+ "D", 1100, VIRTKEY, CONTROL, SHIFT
+ "^R", 444, ASCII, NOINVERT
+}
+
+cursor BITMAP "cursor_small.bmp"
+okay BITMAP "okay_small.bmp"
+
+14432 MENU
+LANGUAGE LANG_CHINESE, SUBLANG_CHINESE_SIMPLIFIED
+{
+ MENUITEM "yu", 100
+ MENUITEM "shala", 101
+ MENUITEM "kaoya", 102
+}
+
+testdialog DIALOG 10, 10, 200, 300
+STYLE WS_POPUP | WS_BORDER
+CAPTION "Test"
+{
+ CTEXT "Continue:", 1, 10, 10, 230, 14
+ PUSHBUTTON "&OK", 2, 66, 134, 161, 13
+}
+
+12 ACCELERATORS
+{
+ "X", 164, VIRTKEY, ALT
+ "H", 5678, VIRTKEY, CONTROL, SHIFT
+ "^R", 444, ASCII, NOINVERT
+}
+
+"eat" MENU
+LANGUAGE LANG_ENGLISH, SUBLANG_ENGLISH_AUS
+{
+ MENUITEM "fish", 100
+ MENUITEM "salad", 101
+ MENUITEM "duck", 102
+}
diff --git a/test/tools/llvm-readobj/Inputs/resources/test_resource.res b/test/tools/llvm-readobj/Inputs/resources/test_resource.res
new file mode 100644
index 000000000000..c577ecc3d633
--- /dev/null
+++ b/test/tools/llvm-readobj/Inputs/resources/test_resource.res
Binary files differ
diff --git a/test/tools/llvm-readobj/resources.test b/test/tools/llvm-readobj/resources.test
index 46ee8b99a65d..855ce5393b84 100644
--- a/test/tools/llvm-readobj/resources.test
+++ b/test/tools/llvm-readobj/resources.test
@@ -1,19 +1,111 @@
-RUN: llvm-readobj -coff-resources %p/Inputs/zero-string-table.obj.coff-i386 \
-RUN: | FileCheck %s -check-prefix RESOURCE
+// Check dumping of the .rsrc section(s)
+// The input was generated with the following commands, using the original Windows
+// rc.exe and cvtres.exe:
+// > rc /fo test_resource.res /nologo test_resource.rc
+// > cvtres /machine:X86 /readonly /nologo /out:test_resource.o test_resource.res
-RESOURCE: Resources [
-RESOURCE-NEXT: Time/Date Stamp: 1970-01-01 00:00:00 (0x0)
-RESOURCE-NEXT: .rsrc$01 Data (
-RESOURCE-NEXT: 0000: 00000000 00000000 00000000 00000100 |................|
-RESOURCE-NEXT: 0010: 06000000 18000080 00000000 00000000 |................|
-RESOURCE-NEXT: 0020: 00000000 00000100 01000000 30000080 |............0...|
-RESOURCE-NEXT: 0030: 00000000 00000000 00000000 00000100 |................|
-RESOURCE-NEXT: 0040: 09040000 48000000 00000000 2A000000 |....H.......*...|
-RESOURCE-NEXT: 0050: 00000000 00000000 |........|
-RESOURCE-NEXT: )
-RESOURCE-NEXT: .rsrc$02 Data (
-RESOURCE-NEXT: 0000: 00000500 48006500 6C006C00 6F000000 |....H.e.l.l.o...|
-RESOURCE-NEXT: 0010: 00000000 00000000 00000000 00000000 |................|
-RESOURCE-NEXT: 0020: 00000000 00000000 00000000 00000000 |................|
-RESOURCE-NEXT: )
-RESOURCE-NEXT: ]
+RUN: llvm-readobj -coff-resources -section-data %p/Inputs/zero-string-table.obj.coff-i386 \
+RUN: | FileCheck %s -check-prefix ZERO
+RUN: llvm-readobj -coff-resources %p/Inputs/resources/test_resource.obj.coff \
+RUN: | FileCheck %s -check-prefix TEST_RES
+
+ZERO: Resources [
+ZERO-NEXT: String Name Entries: 0
+ZERO-NEXT: ID Entries: 1
+ZERO-NEXT: Type: kRT_STRING (ID 6) [
+ZERO-NEXT: String Name Entries: 0
+ZERO-NEXT: ID Entries: 1
+ZERO-NEXT: Name: (ID 1) [
+ZERO-NEXT: String Name Entries: 0
+ZERO-NEXT: ID Entries: 1
+ZERO-NEXT: Language: (ID 1033) [
+ZERO-NEXT: Time/Date Stamp: 1970-01-01 00:00:00 (0x0)
+ZERO-NEXT: Major Version: 0
+ZERO-NEXT: Minor Version: 0
+ZERO-NEXT: ]
+ZERO-NEXT: ]
+ZERO-NEXT: ]
+
+
+TEST_RES: Resources [
+TEST_RES-NEXT: String Name Entries: 0
+TEST_RES-NEXT: ID Entries: 4
+TEST_RES-NEXT: Type: kRT_BITMAP (ID 2) [
+TEST_RES-NEXT: String Name Entries: 2
+TEST_RES-NEXT: ID Entries: 0
+TEST_RES-NEXT: Name: CURSOR [
+TEST_RES-NEXT: String Name Entries: 0
+TEST_RES-NEXT: ID Entries: 1
+TEST_RES-NEXT: Language: (ID 1033) [
+TEST_RES-NEXT: Time/Date Stamp: 1970-01-01 00:00:00 (0x0)
+TEST_RES-NEXT: Major Version: 0
+TEST_RES-NEXT: Minor Version: 0
+TEST_RES-NEXT: ]
+TEST_RES-NEXT: ]
+TEST_RES-NEXT: Name: OKAY [
+TEST_RES-NEXT: String Name Entries: 0
+TEST_RES-NEXT: ID Entries: 1
+TEST_RES-NEXT: Language: (ID 1033) [
+TEST_RES-NEXT: Time/Date Stamp: 1970-01-01 00:00:00 (0x0)
+TEST_RES-NEXT: Major Version: 0
+TEST_RES-NEXT: Minor Version: 0
+TEST_RES-NEXT: ]
+TEST_RES-NEXT: ]
+TEST_RES-NEXT: ]
+TEST_RES-NEXT: Type: kRT_MENU (ID 4) [
+TEST_RES-NEXT: String Name Entries: 1
+TEST_RES-NEXT: ID Entries: 1
+TEST_RES-NEXT: Name: "EAT" [
+TEST_RES-NEXT: String Name Entries: 0
+TEST_RES-NEXT: ID Entries: 1
+TEST_RES-NEXT: Language: (ID 3081) [
+TEST_RES-NEXT: Time/Date Stamp: 1970-01-01 00:00:00 (0x0)
+TEST_RES-NEXT: Major Version: 0
+TEST_RES-NEXT: Minor Version: 0
+TEST_RES-NEXT: ]
+TEST_RES-NEXT: ]
+TEST_RES-NEXT: Name: (ID 14432) [
+TEST_RES-NEXT: String Name Entries: 0
+TEST_RES-NEXT: ID Entries: 1
+TEST_RES-NEXT: Language: (ID 2052) [
+TEST_RES-NEXT: Time/Date Stamp: 1970-01-01 00:00:00 (0x0)
+TEST_RES-NEXT: Major Version: 0
+TEST_RES-NEXT: Minor Version: 0
+TEST_RES-NEXT: ]
+TEST_RES-NEXT: ]
+TEST_RES-NEXT: ]
+TEST_RES-NEXT: Type: kRT_DIALOG (ID 5) [
+TEST_RES-NEXT: String Name Entries: 1
+TEST_RES-NEXT: ID Entries: 0
+TEST_RES-NEXT: Name: TESTDIALOG [
+TEST_RES-NEXT: String Name Entries: 0
+TEST_RES-NEXT: ID Entries: 1
+TEST_RES-NEXT: Language: (ID 1033) [
+TEST_RES-NEXT: Time/Date Stamp: 1970-01-01 00:00:00 (0x0)
+TEST_RES-NEXT: Major Version: 0
+TEST_RES-NEXT: Minor Version: 0
+TEST_RES-NEXT: ]
+TEST_RES-NEXT: ]
+TEST_RES-NEXT: ]
+TEST_RES-NEXT: Type: kRT_ACCELERATOR (ID 9) [
+TEST_RES-NEXT: String Name Entries: 1
+TEST_RES-NEXT: ID Entries: 1
+TEST_RES-NEXT: Name: MYACCELERATORS [
+TEST_RES-NEXT: String Name Entries: 0
+TEST_RES-NEXT: ID Entries: 1
+TEST_RES-NEXT: Language: (ID 1033) [
+TEST_RES-NEXT: Time/Date Stamp: 1970-01-01 00:00:00 (0x0)
+TEST_RES-NEXT: Major Version: 0
+TEST_RES-NEXT: Minor Version: 0
+TEST_RES-NEXT: ]
+TEST_RES-NEXT: ]
+TEST_RES-NEXT: Name: (ID 12) [
+TEST_RES-NEXT: String Name Entries: 0
+TEST_RES-NEXT: ID Entries: 1
+TEST_RES-NEXT: Language: (ID 1033) [
+TEST_RES-NEXT: Time/Date Stamp: 1970-01-01 00:00:00 (0x0)
+TEST_RES-NEXT: Major Version: 0
+TEST_RES-NEXT: Minor Version: 0
+TEST_RES-NEXT: ]
+TEST_RES-NEXT: ]
+TEST_RES-NEXT: ]
diff --git a/tools/llvm-link/llvm-link.cpp b/tools/llvm-link/llvm-link.cpp
index 27199d53538e..568e5f8d2d58 100644
--- a/tools/llvm-link/llvm-link.cpp
+++ b/tools/llvm-link/llvm-link.cpp
@@ -300,7 +300,7 @@ static bool linkFiles(const char *argv0, LLVMContext &Context, Linker &L,
// does not do the ThinLink that would normally determine what values to
// promote.
for (auto &I : *Index) {
- for (auto &S : I.second) {
+ for (auto &S : I.second.SummaryList) {
if (GlobalValue::isLocalLinkage(S->linkage()))
S->setLinkage(GlobalValue::ExternalLinkage);
}
diff --git a/tools/llvm-lto/llvm-lto.cpp b/tools/llvm-lto/llvm-lto.cpp
index 27e5c5e122c2..2458d3d123ca 100644
--- a/tools/llvm-lto/llvm-lto.cpp
+++ b/tools/llvm-lto/llvm-lto.cpp
@@ -284,7 +284,7 @@ void printIndexStats() {
unsigned Calls = 0, Refs = 0, Functions = 0, Alias = 0, Globals = 0;
for (auto &Summaries : *Index) {
- for (auto &Summary : Summaries.second) {
+ for (auto &Summary : Summaries.second.SummaryList) {
Refs += Summary->refs().size();
if (auto *FuncSummary = dyn_cast<FunctionSummary>(Summary.get())) {
Functions++;
diff --git a/tools/llvm-pdbdump/Analyze.cpp b/tools/llvm-pdbdump/Analyze.cpp
index b65dd40d25ff..f7d6ec53b030 100644
--- a/tools/llvm-pdbdump/Analyze.cpp
+++ b/tools/llvm-pdbdump/Analyze.cpp
@@ -74,7 +74,7 @@ Error AnalysisStyle::dump() {
if (!Tpi)
return Tpi.takeError();
- TypeDatabase TypeDB;
+ TypeDatabase TypeDB(Tpi->getNumTypeRecords());
TypeDatabaseVisitor DBV(TypeDB);
TypeDeserializer Deserializer;
TypeVisitorCallbackPipeline Pipeline;
diff --git a/tools/llvm-pdbdump/LLVMOutputStyle.cpp b/tools/llvm-pdbdump/LLVMOutputStyle.cpp
index ec1325ff2335..2dd4ef0fb30d 100644
--- a/tools/llvm-pdbdump/LLVMOutputStyle.cpp
+++ b/tools/llvm-pdbdump/LLVMOutputStyle.cpp
@@ -39,6 +39,7 @@
#include "llvm/DebugInfo/PDB/Native/PDBFile.h"
#include "llvm/DebugInfo/PDB/Native/PublicsStream.h"
#include "llvm/DebugInfo/PDB/Native/RawError.h"
+#include "llvm/DebugInfo/PDB/Native/TpiHashing.h"
#include "llvm/DebugInfo/PDB/Native/TpiStream.h"
#include "llvm/DebugInfo/PDB/PDBExtras.h"
#include "llvm/Object/COFF.h"
@@ -609,11 +610,8 @@ Error LLVMOutputStyle::dumpTpiStream(uint32_t StreamIdx) {
VerLabel = "IPI Version";
}
- bool IsSilentDatabaseBuild = !DumpRecordBytes && !DumpRecords && !DumpTpiHash;
- if (IsSilentDatabaseBuild) {
- outs().flush();
- errs() << "Building Type Information For " << Label << "\n";
- }
+ if (!DumpRecordBytes && !DumpRecords && !DumpTpiHash)
+ return Error::success();
auto Tpi = (StreamIdx == StreamTPI) ? File.getPDBTpiStream()
: File.getPDBIpiStream();
@@ -623,38 +621,43 @@ Error LLVMOutputStyle::dumpTpiStream(uint32_t StreamIdx) {
std::unique_ptr<DictScope> StreamScope;
std::unique_ptr<ListScope> RecordScope;
- if (!IsSilentDatabaseBuild) {
- StreamScope = llvm::make_unique<DictScope>(P, Label);
- P.printNumber(VerLabel, Tpi->getTpiVersion());
- P.printNumber("Record count", Tpi->NumTypeRecords());
- }
-
- TypeDatabase &StreamDB = (StreamIdx == StreamTPI) ? TypeDB : ItemDB;
+ StreamScope = llvm::make_unique<DictScope>(P, Label);
+ P.printNumber(VerLabel, Tpi->getTpiVersion());
+ P.printNumber("Record count", Tpi->getNumTypeRecords());
- TypeDatabaseVisitor DBV(StreamDB);
- CompactTypeDumpVisitor CTDV(StreamDB, &P);
- TypeDumpVisitor TDV(TypeDB, &P, false);
- if (StreamIdx == StreamIPI)
- TDV.setItemDB(ItemDB);
- RecordBytesVisitor RBV(P);
- TypeDeserializer Deserializer;
+ Optional<TypeDatabase> &StreamDB = (StreamIdx == StreamTPI) ? TypeDB : ItemDB;
- // We always need to deserialize and add it to the type database. This is
- // true if even if we're not dumping anything, because we could need the
- // type database for the purposes of dumping symbols.
- TypeVisitorCallbackPipeline Pipeline;
- Pipeline.addCallbackToPipeline(Deserializer);
- Pipeline.addCallbackToPipeline(DBV);
+ std::vector<std::unique_ptr<TypeVisitorCallbacks>> Visitors;
+ Visitors.push_back(make_unique<TypeDeserializer>());
+ if (!StreamDB.hasValue()) {
+ StreamDB.emplace(Tpi->getNumTypeRecords());
+ Visitors.push_back(make_unique<TypeDatabaseVisitor>(*StreamDB));
+ }
// If we're in dump mode, add a dumper with the appropriate detail level.
if (DumpRecords) {
+ std::unique_ptr<TypeVisitorCallbacks> Dumper;
if (opts::raw::CompactRecords)
- Pipeline.addCallbackToPipeline(CTDV);
- else
- Pipeline.addCallbackToPipeline(TDV);
+ Dumper = make_unique<CompactTypeDumpVisitor>(*StreamDB, &P);
+ else {
+ assert(TypeDB.hasValue());
+
+ auto X = make_unique<TypeDumpVisitor>(*TypeDB, &P, false);
+ if (StreamIdx == StreamIPI)
+ X->setItemDB(*ItemDB);
+ Dumper = std::move(X);
+ }
+ Visitors.push_back(std::move(Dumper));
}
if (DumpRecordBytes)
- Pipeline.addCallbackToPipeline(RBV);
+ Visitors.push_back(make_unique<RecordBytesVisitor>(P));
+
+ // We always need to deserialize and add it to the type database. This is
+ // true if even if we're not dumping anything, because we could need the
+ // type database for the purposes of dumping symbols.
+ TypeVisitorCallbackPipeline Pipeline;
+ for (const auto &V : Visitors)
+ Pipeline.addCallbackToPipeline(*V);
CVTypeVisitor Visitor(Pipeline);
@@ -680,7 +683,7 @@ Error LLVMOutputStyle::dumpTpiStream(uint32_t StreamIdx) {
if (DumpTpiHash) {
DictScope DD(P, "Hash");
- P.printNumber("Number of Hash Buckets", Tpi->NumHashBuckets());
+ P.printNumber("Number of Hash Buckets", Tpi->getNumHashBuckets());
P.printNumber("Hash Key Size", Tpi->getHashKeySize());
P.printList("Values", Tpi->getHashValues());
@@ -700,19 +703,51 @@ Error LLVMOutputStyle::dumpTpiStream(uint32_t StreamIdx) {
}
}
- if (!IsSilentDatabaseBuild) {
- ListScope L(P, "TypeIndexOffsets");
- for (const auto &IO : Tpi->getTypeIndexOffsets()) {
- P.printString(formatv("Index: {0:x}, Offset: {1:N}", IO.Type.getIndex(),
- (uint32_t)IO.Offset)
- .str());
- }
+ ListScope L(P, "TypeIndexOffsets");
+ for (const auto &IO : Tpi->getTypeIndexOffsets()) {
+ P.printString(formatv("Index: {0:x}, Offset: {1:N}", IO.Type.getIndex(),
+ (uint32_t)IO.Offset)
+ .str());
}
P.flush();
return Error::success();
}
+Error LLVMOutputStyle::buildTypeDatabase(uint32_t SN) {
+ assert(SN == StreamIPI || SN == StreamTPI);
+
+ auto &DB = (SN == StreamIPI) ? ItemDB : TypeDB;
+
+ if (DB.hasValue())
+ return Error::success();
+
+ auto Tpi =
+ (SN == StreamTPI) ? File.getPDBTpiStream() : File.getPDBIpiStream();
+
+ if (!Tpi)
+ return Tpi.takeError();
+
+ DB.emplace(Tpi->getNumTypeRecords());
+
+ TypeVisitorCallbackPipeline Pipeline;
+ TypeDeserializer Deserializer;
+ TypeDatabaseVisitor DBV(*DB);
+ Pipeline.addCallbackToPipeline(Deserializer);
+ Pipeline.addCallbackToPipeline(DBV);
+
+ auto HashValues = Tpi->getHashValues();
+ std::unique_ptr<TpiHashVerifier> HashVerifier;
+ if (!HashValues.empty()) {
+ HashVerifier =
+ make_unique<TpiHashVerifier>(HashValues, Tpi->getNumHashBuckets());
+ Pipeline.addCallbackToPipeline(*HashVerifier);
+ }
+
+ CVTypeVisitor Visitor(Pipeline);
+ return Visitor.visitTypeStream(Tpi->types(nullptr));
+}
+
Error LLVMOutputStyle::dumpDbiStream() {
bool DumpModules = opts::raw::DumpModules || opts::raw::DumpModuleSyms ||
opts::raw::DumpModuleFiles || opts::raw::DumpLineInfo;
@@ -750,43 +785,46 @@ Error LLVMOutputStyle::dumpDbiStream() {
if (DumpModules) {
ListScope L(P, "Modules");
- for (auto &Modi : DS->modules()) {
+ const DbiModuleList &Modules = DS->modules();
+ for (uint32_t I = 0; I < Modules.getModuleCount(); ++I) {
+ const DbiModuleDescriptor &Modi = Modules.getModuleDescriptor(I);
DictScope DD(P);
- P.printString("Name", Modi.Info.getModuleName().str());
- P.printNumber("Debug Stream Index", Modi.Info.getModuleStreamIndex());
- P.printString("Object File Name", Modi.Info.getObjFileName().str());
- P.printNumber("Num Files", Modi.Info.getNumberOfFiles());
- P.printNumber("Source File Name Idx", Modi.Info.getSourceFileNameIndex());
- P.printNumber("Pdb File Name Idx", Modi.Info.getPdbFilePathNameIndex());
- P.printNumber("Line Info Byte Size", Modi.Info.getC11LineInfoByteSize());
- P.printNumber("C13 Line Info Byte Size",
- Modi.Info.getC13LineInfoByteSize());
- P.printNumber("Symbol Byte Size", Modi.Info.getSymbolDebugInfoByteSize());
- P.printNumber("Type Server Index", Modi.Info.getTypeServerIndex());
- P.printBoolean("Has EC Info", Modi.Info.hasECInfo());
+ P.printString("Name", Modi.getModuleName().str());
+ P.printNumber("Debug Stream Index", Modi.getModuleStreamIndex());
+ P.printString("Object File Name", Modi.getObjFileName().str());
+ P.printNumber("Num Files", Modi.getNumberOfFiles());
+ P.printNumber("Source File Name Idx", Modi.getSourceFileNameIndex());
+ P.printNumber("Pdb File Name Idx", Modi.getPdbFilePathNameIndex());
+ P.printNumber("Line Info Byte Size", Modi.getC11LineInfoByteSize());
+ P.printNumber("C13 Line Info Byte Size", Modi.getC13LineInfoByteSize());
+ P.printNumber("Symbol Byte Size", Modi.getSymbolDebugInfoByteSize());
+ P.printNumber("Type Server Index", Modi.getTypeServerIndex());
+ P.printBoolean("Has EC Info", Modi.hasECInfo());
if (opts::raw::DumpModuleFiles) {
- std::string FileListName =
- to_string(Modi.SourceFiles.size()) + " Contributing Source Files";
+ std::string FileListName = to_string(Modules.getSourceFileCount(I)) +
+ " Contributing Source Files";
ListScope LL(P, FileListName);
- for (auto File : Modi.SourceFiles)
- P.printString(File.str());
+ for (auto File : Modules.source_files(I))
+ P.printString(File);
}
- bool HasModuleDI =
- (Modi.Info.getModuleStreamIndex() < File.getNumStreams());
+ bool HasModuleDI = (Modi.getModuleStreamIndex() < File.getNumStreams());
bool ShouldDumpSymbols =
(opts::raw::DumpModuleSyms || opts::raw::DumpSymRecordBytes);
if (HasModuleDI && (ShouldDumpSymbols || opts::raw::DumpLineInfo)) {
auto ModStreamData = MappedBlockStream::createIndexedStream(
File.getMsfLayout(), File.getMsfBuffer(),
- Modi.Info.getModuleStreamIndex());
+ Modi.getModuleStreamIndex());
- ModuleDebugStreamRef ModS(Modi.Info, std::move(ModStreamData));
+ ModuleDebugStreamRef ModS(Modi, std::move(ModStreamData));
if (auto EC = ModS.reload())
return EC;
if (ShouldDumpSymbols) {
+ if (auto EC = buildTypeDatabase(StreamTPI))
+ return EC;
+
ListScope SS(P, "Symbols");
- codeview::CVSymbolDumper SD(P, TypeDB, nullptr, false);
+ codeview::CVSymbolDumper SD(P, *TypeDB, nullptr, false);
bool HadError = false;
for (auto S : ModS.symbols(&HadError)) {
DictScope LL(P, "");
@@ -807,8 +845,10 @@ Error LLVMOutputStyle::dumpDbiStream() {
}
if (opts::raw::DumpLineInfo) {
ListScope SS(P, "LineInfo");
+ if (auto EC = buildTypeDatabase(StreamIPI))
+ return EC;
- C13RawVisitor V(P, File, ItemDB);
+ C13RawVisitor V(P, File, *ItemDB);
if (auto EC = codeview::visitModuleDebugFragments(
ModS.linesAndChecksums(), V))
return EC;
@@ -846,9 +886,10 @@ Error LLVMOutputStyle::dumpSectionContribs() {
{
DictScope DD(P, "Module");
P.printNumber("Index", SC.Imod);
- auto M = DS.modules();
- if (M.size() > SC.Imod) {
- P.printString("Name", M[SC.Imod].Info.getModuleName());
+ const DbiModuleList &Modules = DS.modules();
+ if (Modules.getModuleCount() > SC.Imod) {
+ P.printString("Name",
+ Modules.getModuleDescriptor(SC.Imod).getModuleName());
}
}
P.printNumber("Data CRC", SC.DataCrc);
@@ -925,7 +966,10 @@ Error LLVMOutputStyle::dumpPublicsStream() {
P.printList("Section Offsets", Publics->getSectionOffsets(),
printSectionOffset);
ListScope L(P, "Symbols");
- codeview::CVSymbolDumper SD(P, TypeDB, nullptr, false);
+ if (auto EC = buildTypeDatabase(StreamTPI))
+ return EC;
+
+ codeview::CVSymbolDumper SD(P, *TypeDB, nullptr, false);
bool HadError = false;
for (auto S : Publics->getSymbols(&HadError)) {
DictScope DD(P, "");
diff --git a/tools/llvm-pdbdump/LLVMOutputStyle.h b/tools/llvm-pdbdump/LLVMOutputStyle.h
index bfff3b8308db..b0e7e3406b36 100644
--- a/tools/llvm-pdbdump/LLVMOutputStyle.h
+++ b/tools/llvm-pdbdump/LLVMOutputStyle.h
@@ -12,6 +12,7 @@
#include "OutputStyle.h"
+#include "llvm/ADT/Optional.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/DebugInfo/CodeView/TypeDatabase.h"
#include "llvm/Support/ScopedPrinter.h"
@@ -28,6 +29,8 @@ public:
Error dump() override;
private:
+ Error buildTypeDatabase(uint32_t SN);
+
Error dumpFileHeaders();
Error dumpStreamSummary();
Error dumpFreePageMap();
@@ -51,8 +54,8 @@ private:
PDBFile &File;
ScopedPrinter P;
- codeview::TypeDatabase TypeDB;
- codeview::TypeDatabase ItemDB;
+ Optional<codeview::TypeDatabase> TypeDB;
+ Optional<codeview::TypeDatabase> ItemDB;
SmallVector<std::string, 32> StreamPurposes;
};
}
diff --git a/tools/llvm-pdbdump/StreamUtil.cpp b/tools/llvm-pdbdump/StreamUtil.cpp
index 6577702adac8..81aa256b5002 100644
--- a/tools/llvm-pdbdump/StreamUtil.cpp
+++ b/tools/llvm-pdbdump/StreamUtil.cpp
@@ -12,6 +12,7 @@
#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/DenseMapInfo.h"
#include "llvm/DebugInfo/PDB/Native/DbiModuleDescriptor.h"
+#include "llvm/DebugInfo/PDB/Native/DbiModuleList.h"
#include "llvm/DebugInfo/PDB/Native/DbiStream.h"
#include "llvm/DebugInfo/PDB/Native/InfoStream.h"
#include "llvm/DebugInfo/PDB/Native/PDBFile.h"
@@ -30,14 +31,16 @@ void discoverStreamPurposes(PDBFile &File,
auto Info = File.getPDBInfoStream();
uint32_t StreamCount = File.getNumStreams();
- DenseMap<uint16_t, const ModuleInfoEx *> ModStreams;
+ DenseMap<uint16_t, DbiModuleDescriptor> ModStreams;
DenseMap<uint16_t, std::string> NamedStreams;
if (Dbi) {
- for (auto &ModI : Dbi->modules()) {
- uint16_t SN = ModI.Info.getModuleStreamIndex();
+ const DbiModuleList &Modules = Dbi->modules();
+ for (uint32_t I = 0; I < Modules.getModuleCount(); ++I) {
+ DbiModuleDescriptor Descriptor = Modules.getModuleDescriptor(I);
+ uint16_t SN = Descriptor.getModuleStreamIndex();
if (SN != kInvalidStreamIndex)
- ModStreams[SN] = &ModI;
+ ModStreams[SN] = Descriptor;
}
}
if (Info) {
@@ -109,7 +112,7 @@ void discoverStreamPurposes(PDBFile &File,
auto NSIter = NamedStreams.find(StreamIdx);
if (ModIter != ModStreams.end()) {
Value = "Module \"";
- Value += ModIter->second->Info.getModuleName().str();
+ Value += ModIter->second.getModuleName();
Value += "\"";
} else if (NSIter != NamedStreams.end()) {
Value = "Named Stream \"";
diff --git a/tools/llvm-pdbdump/YAMLOutputStyle.cpp b/tools/llvm-pdbdump/YAMLOutputStyle.cpp
index b94b5a4abf37..0573b23cdc76 100644
--- a/tools/llvm-pdbdump/YAMLOutputStyle.cpp
+++ b/tools/llvm-pdbdump/YAMLOutputStyle.cpp
@@ -305,23 +305,28 @@ Error YAMLOutputStyle::dumpDbiStream() {
Obj.DbiStream->PdbDllVersion = DS.getPdbDllVersion();
Obj.DbiStream->VerHeader = DS.getDbiVersion();
if (opts::pdb2yaml::DbiModuleInfo) {
- for (const auto &MI : DS.modules()) {
+ const auto &Modules = DS.modules();
+ for (uint32_t I = 0; I < Modules.getModuleCount(); ++I) {
+ DbiModuleDescriptor MI = Modules.getModuleDescriptor(I);
+
Obj.DbiStream->ModInfos.emplace_back();
yaml::PdbDbiModuleInfo &DMI = Obj.DbiStream->ModInfos.back();
- DMI.Mod = MI.Info.getModuleName();
- DMI.Obj = MI.Info.getObjFileName();
- if (opts::pdb2yaml::DbiModuleSourceFileInfo)
- DMI.SourceFiles = MI.SourceFiles;
+ DMI.Mod = MI.getModuleName();
+ DMI.Obj = MI.getObjFileName();
+ if (opts::pdb2yaml::DbiModuleSourceFileInfo) {
+ auto Files = Modules.source_files(I);
+ DMI.SourceFiles.assign(Files.begin(), Files.end());
+ }
- uint16_t ModiStream = MI.Info.getModuleStreamIndex();
+ uint16_t ModiStream = MI.getModuleStreamIndex();
if (ModiStream == kInvalidStreamIndex)
continue;
auto ModStreamData = msf::MappedBlockStream::createIndexedStream(
File.getMsfLayout(), File.getMsfBuffer(), ModiStream);
- pdb::ModuleDebugStreamRef ModS(MI.Info, std::move(ModStreamData));
+ pdb::ModuleDebugStreamRef ModS(MI, std::move(ModStreamData));
if (auto EC = ModS.reload())
return EC;
diff --git a/tools/llvm-readobj/COFFDumper.cpp b/tools/llvm-readobj/COFFDumper.cpp
index 04386875b95a..049af2c4f076 100644
--- a/tools/llvm-readobj/COFFDumper.cpp
+++ b/tools/llvm-readobj/COFFDumper.cpp
@@ -44,6 +44,7 @@
#include "llvm/Support/BinaryByteStream.h"
#include "llvm/Support/BinaryStreamReader.h"
#include "llvm/Support/COFF.h"
+#include "llvm/Support/ConvertUTF.h"
#include "llvm/Support/Casting.h"
#include "llvm/Support/Compiler.h"
#include "llvm/Support/DataExtractor.h"
@@ -70,7 +71,7 @@ class COFFDumper : public ObjDumper {
public:
friend class COFFObjectDumpDelegate;
COFFDumper(const llvm::object::COFFObjectFile *Obj, ScopedPrinter &Writer)
- : ObjDumper(Writer), Obj(Obj), Writer(Writer) {}
+ : ObjDumper(Writer), Obj(Obj), Writer(Writer), TypeDB(100) {}
void printFileHeaders() override;
void printSections() override;
@@ -121,6 +122,10 @@ private:
uint32_t RelocOffset, uint32_t Offset,
StringRef *RelocSym = nullptr);
+ void printResourceDirectoryTable(ResourceSectionRef RSF,
+ const coff_resource_dir_table &Table,
+ StringRef Level);
+
void printBinaryBlockWithRelocs(StringRef Label, const SectionRef &Sec,
StringRef SectionContents, StringRef Block);
@@ -140,6 +145,9 @@ private:
void printDelayImportedSymbols(
const DelayImportDirectoryEntryRef &I,
iterator_range<imported_symbol_iterator> Range);
+ ErrorOr<const coff_resource_dir_entry &>
+ getResourceDirectoryTableEntry(const coff_resource_dir_table &Table,
+ uint32_t Index);
typedef DenseMap<const coff_section*, std::vector<RelocationRef> > RelocMapTy;
@@ -534,6 +542,29 @@ static const EnumEntry<uint8_t> FileChecksumKindNames[] = {
LLVM_READOBJ_ENUM_CLASS_ENT(FileChecksumKind, SHA256),
};
+static const EnumEntry<COFF::ResourceTypeID> ResourceTypeNames[]{
+ {"kRT_CURSOR (ID 1)", COFF::RID_Cursor},
+ {"kRT_BITMAP (ID 2)", COFF::RID_Bitmap},
+ {"kRT_ICON (ID 3)", COFF::RID_Icon},
+ {"kRT_MENU (ID 4)", COFF::RID_Menu},
+ {"kRT_DIALOG (ID 5)", COFF::RID_Dialog},
+ {"kRT_STRING (ID 6)", COFF::RID_String},
+ {"kRT_FONTDIR (ID 7)", COFF::RID_FontDir},
+ {"kRT_FONT (ID 8)", COFF::RID_Font},
+ {"kRT_ACCELERATOR (ID 9)", COFF::RID_Accelerator},
+ {"kRT_RCDATA (ID 10)", COFF::RID_RCData},
+ {"kRT_MESSAGETABLE (ID 11)", COFF::RID_MessageTable},
+ {"kRT_GROUP_CURSOR (ID 12)", COFF::RID_Group_Cursor},
+ {"kRT_GROUP_ICON (ID 14)", COFF::RID_Group_Icon},
+ {"kRT_VERSION (ID 16)", COFF::RID_Version},
+ {"kRT_DLGINCLUDE (ID 17)", COFF::RID_DLGInclude},
+ {"kRT_PLUGPLAY (ID 19)", COFF::RID_PlugPlay},
+ {"kRT_VXD (ID 20)", COFF::RID_VXD},
+ {"kRT_ANICURSOR (ID 21)", COFF::RID_AniCursor},
+ {"kRT_ANIICON (ID 22)", COFF::RID_AniIcon},
+ {"kRT_HTML (ID 23)", COFF::RID_HTML},
+ {"kRT_MANIFEST (ID 24)", COFF::RID_Manifest}};
+
template <typename T>
static std::error_code getSymbolAuxData(const COFFObjectFile *Obj,
COFFSymbolRef Symbol,
@@ -1503,18 +1534,76 @@ void COFFDumper::printCOFFResources() {
error(S.getContents(Ref));
if ((Name == ".rsrc") || (Name == ".rsrc$01")) {
- auto Table =
- reinterpret_cast<const coff_resource_dir_table *>(Ref.data());
- char FormattedTime[20];
- time_t TDS = time_t(Table->TimeDateStamp);
- strftime(FormattedTime, sizeof(FormattedTime), "%Y-%m-%d %H:%M:%S",
- gmtime(&TDS));
- W.printHex("Time/Date Stamp", FormattedTime, Table->TimeDateStamp);
+ ResourceSectionRef RSF(Ref);
+ auto &BaseTable = unwrapOrError(RSF.getBaseTable());
+ printResourceDirectoryTable(RSF, BaseTable, "Type");
+ }
+ if (opts::SectionData)
+ W.printBinaryBlock(Name.str() + " Data", Ref);
+ }
+}
+
+void COFFDumper::printResourceDirectoryTable(
+ ResourceSectionRef RSF, const coff_resource_dir_table &Table,
+ StringRef Level) {
+ W.printNumber("String Name Entries", Table.NumberOfNameEntries);
+ W.printNumber("ID Entries", Table.NumberOfIDEntries);
+
+ char FormattedTime[20] = {};
+ time_t TDS = time_t(Table.TimeDateStamp);
+ strftime(FormattedTime, 20, "%Y-%m-%d %H:%M:%S", gmtime(&TDS));
+
+ // Iterate through level in resource directory tree.
+ for (int i = 0; i < Table.NumberOfNameEntries + Table.NumberOfIDEntries;
+ i++) {
+ auto Entry = unwrapOrError(getResourceDirectoryTableEntry(Table, i));
+ StringRef Name;
+ SmallString<20> IDStr;
+ raw_svector_ostream OS(IDStr);
+ if (i < Table.NumberOfNameEntries) {
+ ArrayRef<UTF16> RawEntryNameString = unwrapOrError(RSF.getEntryNameString(Entry));
+ std::string EntryNameString;
+ if (!llvm::convertUTF16ToUTF8String(RawEntryNameString, EntryNameString))
+ error(object_error::parse_failed);
+ OS << ": ";
+ OS << EntryNameString;
+ } else {
+ if (Level == "Type") {
+ ScopedPrinter Printer(OS);
+ Printer.printEnum("", Entry.Identifier.ID,
+ makeArrayRef(ResourceTypeNames));
+ IDStr = IDStr.slice(0, IDStr.find_first_of(")", 0) + 1);
+ } else {
+ OS << ": (ID " << Entry.Identifier.ID << ")";
+ }
+ }
+ Name = StringRef(IDStr);
+ ListScope ResourceType(W, Level.str() + Name.str());
+ if (Entry.Offset.isSubDir()) {
+ StringRef NextLevel;
+ if (Level == "Name")
+ NextLevel = "Language";
+ else
+ NextLevel = "Name";
+ auto &NextTable = unwrapOrError(RSF.getEntrySubDir(Entry));
+ printResourceDirectoryTable(RSF, NextTable, NextLevel);
+ } else {
+ W.printHex("Time/Date Stamp", FormattedTime, Table.TimeDateStamp);
+ W.printNumber("Major Version", Table.MajorVersion);
+ W.printNumber("Minor Version", Table.MinorVersion);
}
- W.printBinaryBlock(Name.str() + " Data", Ref);
}
}
+ErrorOr<const coff_resource_dir_entry &>
+COFFDumper::getResourceDirectoryTableEntry(const coff_resource_dir_table &Table,
+ uint32_t Index) {
+ if (Index >= (uint32_t)(Table.NumberOfNameEntries + Table.NumberOfIDEntries))
+ return object_error::parse_failed;
+ auto TablePtr = reinterpret_cast<const coff_resource_dir_entry *>(&Table + 1);
+ return TablePtr[Index];
+}
+
void COFFDumper::printStackMap() const {
object::SectionRef StackMapSection;
for (auto Sec : Obj->sections()) {
@@ -1553,7 +1642,7 @@ void llvm::dumpCodeViewMergedTypes(ScopedPrinter &Writer,
TypeBuf.append(Record.begin(), Record.end());
});
- TypeDatabase TypeDB;
+ TypeDatabase TypeDB(CVTypes.records().size());
{
ListScope S(Writer, "MergedTypeStream");
CVTypeDumper CVTD(TypeDB);
@@ -1574,7 +1663,7 @@ void llvm::dumpCodeViewMergedTypes(ScopedPrinter &Writer,
{
ListScope S(Writer, "MergedIDStream");
- TypeDatabase IDDB;
+ TypeDatabase IDDB(IDTable.records().size());
CVTypeDumper CVTD(IDDB);
TypeDumpVisitor TDV(TypeDB, &Writer, opts::CodeViewSubsectionBytes);
TDV.setItemDB(IDDB);
diff --git a/tools/llvm-rtdyld/llvm-rtdyld.cpp b/tools/llvm-rtdyld/llvm-rtdyld.cpp
index 4e1caa0400f1..75345de50280 100644
--- a/tools/llvm-rtdyld/llvm-rtdyld.cpp
+++ b/tools/llvm-rtdyld/llvm-rtdyld.cpp
@@ -486,10 +486,7 @@ static int checkAllExpressions(RuntimeDyldChecker &Checker) {
return 0;
}
-static std::map<void *, uint64_t>
-applySpecificSectionMappings(RuntimeDyldChecker &Checker) {
-
- std::map<void*, uint64_t> SpecificMappings;
+void applySpecificSectionMappings(RuntimeDyldChecker &Checker) {
for (StringRef Mapping : SpecificSectionMappings) {
@@ -522,10 +519,7 @@ applySpecificSectionMappings(RuntimeDyldChecker &Checker) {
"'.");
Checker.getRTDyld().mapSectionAddress(OldAddr, NewAddr);
- SpecificMappings[OldAddr] = NewAddr;
}
-
- return SpecificMappings;
}
// Scatter sections in all directions!
@@ -554,8 +548,7 @@ static void remapSectionsAndSymbols(const llvm::Triple &TargetTriple,
// Apply any section-specific mappings that were requested on the command
// line.
- typedef std::map<void*, uint64_t> AppliedMappingsT;
- AppliedMappingsT AppliedMappings = applySpecificSectionMappings(Checker);
+ applySpecificSectionMappings(Checker);
// Keep an "already allocated" mapping of section target addresses to sizes.
// Sections whose address mappings aren't specified on the command line will
@@ -563,15 +556,19 @@ static void remapSectionsAndSymbols(const llvm::Triple &TargetTriple,
// minimum separation.
std::map<uint64_t, uint64_t> AlreadyAllocated;
- // Move the previously applied mappings into the already-allocated map.
+ // Move the previously applied mappings (whether explicitly specified on the
+ // command line, or implicitly set by RuntimeDyld) into the already-allocated
+ // map.
for (WorklistT::iterator I = Worklist.begin(), E = Worklist.end();
I != E;) {
WorklistT::iterator Tmp = I;
++I;
- AppliedMappingsT::iterator AI = AppliedMappings.find(Tmp->first);
+ auto LoadAddr = Checker.getSectionLoadAddress(Tmp->first);
- if (AI != AppliedMappings.end()) {
- AlreadyAllocated[AI->second] = Tmp->second;
+ if (LoadAddr &&
+ *LoadAddr != static_cast<uint64_t>(
+ reinterpret_cast<uintptr_t>(Tmp->first))) {
+ AlreadyAllocated[*LoadAddr] = Tmp->second;
Worklist.erase(Tmp);
}
}
diff --git a/tools/obj2yaml/wasm2yaml.cpp b/tools/obj2yaml/wasm2yaml.cpp
index f6b530c41969..cc04b995f667 100644
--- a/tools/obj2yaml/wasm2yaml.cpp
+++ b/tools/obj2yaml/wasm2yaml.cpp
@@ -44,7 +44,24 @@ ErrorOr<WasmYAML::Object *> WasmDumper::dump() {
}
auto CustomSec = make_unique<WasmYAML::CustomSection>();
CustomSec->Name = WasmSec.Name;
- CustomSec->Payload = yaml::BinaryRef(WasmSec.Content);
+ if (CustomSec->Name == "name") {
+ for (const object::SymbolRef& Sym: Obj.symbols()) {
+ uint32_t Flags = Sym.getFlags();
+ // Skip over symbols that come from imports or exports
+ if (Flags &
+ (object::SymbolRef::SF_Global | object::SymbolRef::SF_Undefined))
+ continue;
+ Expected<StringRef> NameOrError = Sym.getName();
+ if (!NameOrError)
+ continue;
+ WasmYAML::NameEntry NameEntry;
+ NameEntry.Name = *NameOrError;
+ NameEntry.Index = Sym.getValue();
+ CustomSec->FunctionNames.push_back(NameEntry);
+ }
+ } else {
+ CustomSec->Payload = yaml::BinaryRef(WasmSec.Content);
+ }
S = std::move(CustomSec);
break;
}
diff --git a/tools/yaml2obj/yaml2wasm.cpp b/tools/yaml2obj/yaml2wasm.cpp
index 55267ce0392d..eed9f2c4039b 100644
--- a/tools/yaml2obj/yaml2wasm.cpp
+++ b/tools/yaml2obj/yaml2wasm.cpp
@@ -27,6 +27,8 @@ public:
WasmWriter(WasmYAML::Object &Obj) : Obj(Obj) {}
int writeWasm(raw_ostream &OS);
int writeRelocSection(raw_ostream &OS, WasmYAML::Section &Sec);
+ int writeNameSection(raw_ostream &OS, WasmYAML::CustomSection &Section);
+
int writeSectionContent(raw_ostream &OS, WasmYAML::CustomSection &Section);
int writeSectionContent(raw_ostream &OS, WasmYAML::TypeSection &Section);
int writeSectionContent(raw_ostream &OS, WasmYAML::ImportSection &Section);
@@ -65,13 +67,13 @@ static int writeUint8(raw_ostream &OS, uint8_t Value) {
return 0;
}
-static int writeStringRef(StringRef &Str, raw_ostream &OS) {
+static int writeStringRef(const StringRef &Str, raw_ostream &OS) {
encodeULEB128(Str.size(), OS);
OS << Str;
return 0;
}
-static int writeLimits(WasmYAML::Limits Lim, raw_ostream &OS) {
+static int writeLimits(const WasmYAML::Limits &Lim, raw_ostream &OS) {
encodeULEB128(Lim.Flags, OS);
encodeULEB128(Lim.Initial, OS);
if (Lim.Flags & wasm::WASM_LIMITS_FLAG_HAS_MAX)
@@ -79,7 +81,7 @@ static int writeLimits(WasmYAML::Limits Lim, raw_ostream &OS) {
return 0;
}
-static int writeInitExpr(wasm::WasmInitExpr InitExpr, raw_ostream &OS) {
+static int writeInitExpr(const wasm::WasmInitExpr &InitExpr, raw_ostream &OS) {
writeUint8(OS, InitExpr.Opcode);
switch (InitExpr.Opcode) {
case wasm::WASM_OPCODE_I32_CONST:
@@ -105,18 +107,42 @@ static int writeInitExpr(wasm::WasmInitExpr InitExpr, raw_ostream &OS) {
return 0;
}
+int WasmWriter::writeNameSection(raw_ostream &OS,
+ WasmYAML::CustomSection &Section) {
+ writeStringRef(Section.Name, OS);
+ if (Section.FunctionNames.size()) {
+ encodeULEB128(wasm::WASM_NAMES_FUNCTION, OS);
+
+ std::string OutString;
+ raw_string_ostream StringStream(OutString);
+
+ encodeULEB128(Section.FunctionNames.size(), StringStream);
+ for (const WasmYAML::NameEntry &NameEntry : Section.FunctionNames) {
+ encodeULEB128(NameEntry.Index, StringStream);
+ writeStringRef(NameEntry.Name, StringStream);
+ }
+
+ StringStream.flush();
+ encodeULEB128(OutString.size(), OS);
+ OS << OutString;
+ }
+ return 0;
+}
+
int WasmWriter::writeSectionContent(raw_ostream &OS,
WasmYAML::CustomSection &Section) {
- // writeStringRef(Section.Name, OS);
- // encodeULEB128(Section.Payload.binary_size(), OS);
- Section.Payload.writeAsBinary(OS);
+ if (Section.Name == "name") {
+ writeNameSection(OS, Section);
+ } else {
+ Section.Payload.writeAsBinary(OS);
+ }
return 0;
}
int WasmWriter::writeSectionContent(raw_ostream &OS,
WasmYAML::TypeSection &Section) {
encodeULEB128(Section.Signatures.size(), OS);
- for (auto &Sig : Section.Signatures) {
+ for (const WasmYAML::Signature &Sig : Section.Signatures) {
encodeSLEB128(Sig.Form, OS);
encodeULEB128(Sig.ParamTypes.size(), OS);
for (auto ParamType : Sig.ParamTypes)
@@ -134,7 +160,7 @@ int WasmWriter::writeSectionContent(raw_ostream &OS,
int WasmWriter::writeSectionContent(raw_ostream &OS,
WasmYAML::ImportSection &Section) {
encodeULEB128(Section.Imports.size(), OS);
- for (auto &Import : Section.Imports) {
+ for (const WasmYAML::Import &Import : Section.Imports) {
writeStringRef(Import.Module, OS);
writeStringRef(Import.Field, OS);
encodeULEB128(Import.Kind, OS);
@@ -166,7 +192,7 @@ int WasmWriter::writeSectionContent(raw_ostream &OS,
int WasmWriter::writeSectionContent(raw_ostream &OS,
WasmYAML::ExportSection &Section) {
encodeULEB128(Section.Exports.size(), OS);
- for (auto &Export : Section.Exports) {
+ for (const WasmYAML::Export &Export : Section.Exports) {
writeStringRef(Export.Name, OS);
encodeULEB128(Export.Kind, OS);
encodeULEB128(Export.Index, OS);
@@ -193,7 +219,7 @@ int WasmWriter::writeSectionContent(raw_ostream &OS,
int WasmWriter::writeSectionContent(raw_ostream &OS,
WasmYAML::MemorySection &Section) {
encodeULEB128(Section.Memories.size(), OS);
- for (auto &Mem : Section.Memories) {
+ for (const WasmYAML::Limits &Mem : Section.Memories) {
writeLimits(Mem, OS);
}
return 0;
diff --git a/unittests/ADT/APIntTest.cpp b/unittests/ADT/APIntTest.cpp
index bb6cf35fe9e4..5594955e7baf 100644
--- a/unittests/ADT/APIntTest.cpp
+++ b/unittests/ADT/APIntTest.cpp
@@ -2142,4 +2142,23 @@ TEST(APIntTest, sext) {
EXPECT_EQ(63U, i32_neg1.countPopulation());
}
+TEST(APIntTest, multiply) {
+ APInt i64(64, 1234);
+
+ EXPECT_EQ(7006652, i64 * 5678);
+ EXPECT_EQ(7006652, 5678 * i64);
+
+ APInt i128 = APInt::getOneBitSet(128, 64);
+ APInt i128_1234(128, 1234);
+ i128_1234 <<= 64;
+ EXPECT_EQ(i128_1234, i128 * 1234);
+ EXPECT_EQ(i128_1234, 1234 * i128);
+
+ APInt i96 = APInt::getOneBitSet(96, 64);
+ i96 *= ~0ULL;
+ EXPECT_EQ(32U, i96.countLeadingOnes());
+ EXPECT_EQ(32U, i96.countPopulation());
+ EXPECT_EQ(64U, i96.countTrailingZeros());
+}
+
} // end anonymous namespace
diff --git a/unittests/ADT/BitVectorTest.cpp b/unittests/ADT/BitVectorTest.cpp
index ac7429cae36f..faf362abc9d8 100644
--- a/unittests/ADT/BitVectorTest.cpp
+++ b/unittests/ADT/BitVectorTest.cpp
@@ -204,6 +204,11 @@ TYPED_TEST(BitVectorTest, FindOperations) {
EXPECT_EQ(75, A.find_next(13));
EXPECT_EQ(-1, A.find_next(75));
+ EXPECT_EQ(-1, A.find_prev(12));
+ EXPECT_EQ(12, A.find_prev(13));
+ EXPECT_EQ(13, A.find_prev(75));
+ EXPECT_EQ(75, A.find_prev(90));
+
EXPECT_EQ(0, A.find_first_unset());
EXPECT_EQ(99, A.find_last_unset());
EXPECT_EQ(14, A.find_next_unset(11));
@@ -227,6 +232,30 @@ TYPED_TEST(BitVectorTest, FindOperations) {
EXPECT_EQ(-1, A.find_last());
EXPECT_EQ(0, A.find_first_unset());
EXPECT_EQ(99, A.find_last_unset());
+
+ // Also test with a vector that is small enough to fit in 1 word.
+ A.resize(20);
+ A.set(3);
+ A.set(4);
+ A.set(16);
+ EXPECT_EQ(16, A.find_last());
+ EXPECT_EQ(3, A.find_first());
+ EXPECT_EQ(3, A.find_next(1));
+ EXPECT_EQ(4, A.find_next(3));
+ EXPECT_EQ(16, A.find_next(4));
+ EXPECT_EQ(-1, A.find_next(16));
+
+ EXPECT_EQ(-1, A.find_prev(3));
+ EXPECT_EQ(3, A.find_prev(4));
+ EXPECT_EQ(4, A.find_prev(16));
+ EXPECT_EQ(16, A.find_prev(18));
+
+ EXPECT_EQ(0, A.find_first_unset());
+ EXPECT_EQ(19, A.find_last_unset());
+ EXPECT_EQ(5, A.find_next_unset(3));
+ EXPECT_EQ(5, A.find_next_unset(4));
+ EXPECT_EQ(13, A.find_next_unset(12));
+ EXPECT_EQ(17, A.find_next_unset(15));
}
TYPED_TEST(BitVectorTest, CompoundAssignment) {
diff --git a/unittests/Analysis/TargetLibraryInfoTest.cpp b/unittests/Analysis/TargetLibraryInfoTest.cpp
index 598429c968aa..44c141d6a1e9 100644
--- a/unittests/Analysis/TargetLibraryInfoTest.cpp
+++ b/unittests/Analysis/TargetLibraryInfoTest.cpp
@@ -334,6 +334,7 @@ TEST_F(TargetLibraryInfoTest, ValidProto) {
"declare i32 @vsnprintf(i8*, i64, i8*, %struct*)\n"
"declare i32 @vsprintf(i8*, i8*, %struct*)\n"
"declare i32 @vsscanf(i8*, i8*, %struct*)\n"
+ "declare i64 @wcslen(i32*)\n"
// These functions were also extracted from the OS X headers, but they are
// available with a special name on darwin.
diff --git a/unittests/Support/TargetParserTest.cpp b/unittests/Support/TargetParserTest.cpp
index 9465f479fe8c..6be6f7bfb5d5 100644
--- a/unittests/Support/TargetParserTest.cpp
+++ b/unittests/Support/TargetParserTest.cpp
@@ -611,48 +611,58 @@ bool testAArch64CPU(StringRef CPUName, StringRef ExpectedArch,
TEST(TargetParserTest, testAArch64CPU) {
EXPECT_TRUE(testAArch64CPU(
"invalid", "invalid", "invalid",
- AArch64::AEK_INVALID, ""));
+ AArch64::AEK_NONE, ""));
EXPECT_TRUE(testAArch64CPU(
"generic", "invalid", "none",
AArch64::AEK_NONE, ""));
EXPECT_TRUE(testAArch64CPU(
"cortex-a35", "armv8-a", "crypto-neon-fp-armv8",
- AArch64::AEK_CRC | AArch64::AEK_CRYPTO | AArch64::AEK_SIMD, "8-A"));
+ AArch64::AEK_CRC | AArch64::AEK_CRYPTO | AArch64::AEK_FP |
+ AArch64::AEK_SIMD, "8-A"));
EXPECT_TRUE(testAArch64CPU(
"cortex-a53", "armv8-a", "crypto-neon-fp-armv8",
- AArch64::AEK_CRC | AArch64::AEK_CRYPTO | AArch64::AEK_SIMD, "8-A"));
+ AArch64::AEK_CRC | AArch64::AEK_CRYPTO | AArch64::AEK_FP |
+ AArch64::AEK_SIMD, "8-A"));
EXPECT_TRUE(testAArch64CPU(
"cortex-a57", "armv8-a", "crypto-neon-fp-armv8",
- AArch64::AEK_CRC | AArch64::AEK_CRYPTO | AArch64::AEK_SIMD, "8-A"));
+ AArch64::AEK_CRC | AArch64::AEK_CRYPTO | AArch64::AEK_FP |
+ AArch64::AEK_SIMD, "8-A"));
EXPECT_TRUE(testAArch64CPU(
"cortex-a72", "armv8-a", "crypto-neon-fp-armv8",
- AArch64::AEK_CRC | AArch64::AEK_CRYPTO | AArch64::AEK_SIMD, "8-A"));
+ AArch64::AEK_CRC | AArch64::AEK_CRYPTO | AArch64::AEK_FP |
+ AArch64::AEK_SIMD, "8-A"));
EXPECT_TRUE(testAArch64CPU(
"cortex-a73", "armv8-a", "crypto-neon-fp-armv8",
- AArch64::AEK_CRC | AArch64::AEK_CRYPTO | AArch64::AEK_SIMD, "8-A"));
+ AArch64::AEK_CRC | AArch64::AEK_CRYPTO | AArch64::AEK_FP |
+ AArch64::AEK_SIMD, "8-A"));
EXPECT_TRUE(testAArch64CPU(
"cyclone", "armv8-a", "crypto-neon-fp-armv8",
- AArch64::AEK_CRYPTO | AArch64::AEK_SIMD, "8-A"));
+ AArch64::AEK_CRYPTO | AArch64::AEK_FP | AArch64::AEK_SIMD, "8-A"));
EXPECT_TRUE(testAArch64CPU(
"exynos-m1", "armv8-a", "crypto-neon-fp-armv8",
- AArch64::AEK_CRC | AArch64::AEK_CRYPTO | AArch64::AEK_SIMD, "8-A"));
+ AArch64::AEK_CRC | AArch64::AEK_CRYPTO | AArch64::AEK_FP |
+ AArch64::AEK_SIMD, "8-A"));
EXPECT_TRUE(testAArch64CPU(
"exynos-m2", "armv8-a", "crypto-neon-fp-armv8",
- AArch64::AEK_CRC | AArch64::AEK_CRYPTO | AArch64::AEK_SIMD, "8-A"));
+ AArch64::AEK_CRC | AArch64::AEK_CRYPTO | AArch64::AEK_FP |
+ AArch64::AEK_SIMD, "8-A"));
EXPECT_TRUE(testAArch64CPU(
"exynos-m3", "armv8-a", "crypto-neon-fp-armv8",
- AArch64::AEK_CRC | AArch64::AEK_CRYPTO | AArch64::AEK_SIMD, "8-A"));
+ AArch64::AEK_CRC | AArch64::AEK_CRYPTO | AArch64::AEK_FP |
+ AArch64::AEK_SIMD, "8-A"));
EXPECT_TRUE(testAArch64CPU(
"falkor", "armv8-a", "crypto-neon-fp-armv8",
- AArch64::AEK_CRC | AArch64::AEK_CRYPTO | AArch64::AEK_SIMD, "8-A"));
+ AArch64::AEK_CRC | AArch64::AEK_CRYPTO | AArch64::AEK_FP |
+ AArch64::AEK_SIMD, "8-A"));
EXPECT_TRUE(testAArch64CPU(
"kryo", "armv8-a", "crypto-neon-fp-armv8",
- AArch64::AEK_CRC | AArch64::AEK_CRYPTO | AArch64::AEK_SIMD, "8-A"));
+ AArch64::AEK_CRC | AArch64::AEK_CRYPTO | AArch64::AEK_FP |
+ AArch64::AEK_SIMD, "8-A"));
EXPECT_TRUE(testAArch64CPU(
"thunderx2t99", "armv8.1-a", "crypto-neon-fp-armv8",
AArch64::AEK_CRC | AArch64::AEK_CRYPTO | AArch64::AEK_LSE |
- AArch64::AEK_SIMD, "8.1-A"));
+ AArch64::AEK_FP | AArch64::AEK_SIMD, "8.1-A"));
EXPECT_TRUE(testAArch64CPU(
"thunderx", "armv8-a", "crypto-neon-fp-armv8",
AArch64::AEK_CRC | AArch64::AEK_CRYPTO | AArch64::AEK_SIMD |
diff --git a/utils/release/test-release.sh b/utils/release/test-release.sh
index b0c771579802..b597d5f45ad8 100755
--- a/utils/release/test-release.sh
+++ b/utils/release/test-release.sh
@@ -568,17 +568,17 @@ done
) 2>&1 | tee $LogDir/testing.$Release-$RC.log
+if [ "$use_gzip" = "yes" ]; then
+ echo "# Packaging the release as $Package.tar.gz"
+else
+ echo "# Packaging the release as $Package.tar.xz"
+fi
package_release
set +e
# Woo hoo!
echo "### Testing Finished ###"
-if [ "$use_gzip" = "yes" ]; then
- echo "### Package: $Package.tar.gz"
-else
- echo "### Package: $Package.tar.xz"
-fi
echo "### Logs: $LogDir"
echo "### Errors:"
diff --git a/utils/unittest/googletest/README.LLVM b/utils/unittest/googletest/README.LLVM
index 06c80fea0670..99d0bc5b7ae0 100644
--- a/utils/unittest/googletest/README.LLVM
+++ b/utils/unittest/googletest/README.LLVM
@@ -16,5 +16,5 @@ $ rm -f src/gtest_main.cc
$ mv LICENSE LICENSE.TXT
Modified as follows:
-* Added support for Minix and Haiku.
+* Added support for NetBSD, Minix and Haiku.
* Added raw_os_ostream support to include/gtest/internal/custom/gtest-printers.h.
diff --git a/utils/unittest/googletest/include/gtest/internal/gtest-port-arch.h b/utils/unittest/googletest/include/gtest/internal/gtest-port-arch.h
index a375b73799b7..f1319c7f2e29 100644
--- a/utils/unittest/googletest/include/gtest/internal/gtest-port-arch.h
+++ b/utils/unittest/googletest/include/gtest/internal/gtest-port-arch.h
@@ -84,6 +84,8 @@
# define GTEST_OS_HPUX 1
#elif defined __native_client__
# define GTEST_OS_NACL 1
+#elif defined __NetBSD__
+# define GTEST_OS_NETBSD 1
#elif defined __OpenBSD__
# define GTEST_OS_OPENBSD 1
#elif defined __QNX__
diff --git a/utils/unittest/googletest/include/gtest/internal/gtest-port.h b/utils/unittest/googletest/include/gtest/internal/gtest-port.h
index 8762f974ef8d..d36e8203bcc3 100644
--- a/utils/unittest/googletest/include/gtest/internal/gtest-port.h
+++ b/utils/unittest/googletest/include/gtest/internal/gtest-port.h
@@ -793,7 +793,7 @@ using ::std::tuple_size;
(GTEST_OS_MAC && !GTEST_OS_IOS) || \
(GTEST_OS_WINDOWS_DESKTOP && _MSC_VER >= 1400) || \
GTEST_OS_WINDOWS_MINGW || GTEST_OS_AIX || GTEST_OS_HPUX || \
- GTEST_OS_OPENBSD || GTEST_OS_QNX || GTEST_OS_FREEBSD)
+ GTEST_OS_OPENBSD || GTEST_OS_QNX || GTEST_OS_FREEBSD || GTEST_OS_NETBSD)
# define GTEST_HAS_DEATH_TEST 1
#endif